]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge tag 'mac80211-next-for-davem-2015-08-14' mac80211-next.git
authorKalle Valo <kvalo@codeaurora.org>
Tue, 18 Aug 2015 05:44:22 +0000 (08:44 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Tue, 18 Aug 2015 05:44:22 +0000 (08:44 +0300)
iwlwifi needs new mac80211 patches so merge mac80211-next.git to
wireless-drivers-next.git.

1582 files changed:
.mailmap
Documentation/ABI/testing/sysfs-bus-iio
Documentation/DocBook/drm.tmpl
Documentation/arm/SPEAr/overview.txt
Documentation/device-mapper/cache.txt
Documentation/device-mapper/thin-provisioning.txt
Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
Documentation/devicetree/bindings/net/ethernet.txt
Documentation/devicetree/bindings/net/keystone-netcp.txt
Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/mt8173-max98090.txt
Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
Documentation/devicetree/bindings/spi/spi-ath79.txt
Documentation/hwmon/nct7904
Documentation/kbuild/makefiles.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/stmmac.txt
Documentation/networking/switchdev.txt
Documentation/networking/timestamping.txt
Documentation/target/tcm_mod_builder.py
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/Kbuild
arch/alpha/include/asm/mm-arch-hooks.h [deleted file]
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/include/asm/Kbuild
arch/arc/include/asm/bitops.h
arch/arc/include/asm/futex.h
arch/arc/include/asm/mm-arch-hooks.h [deleted file]
arch/arc/include/asm/ptrace.h
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/intc-compact.c
arch/arc/kernel/mcip.c
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arm/boot/dts/am335x-pepper.dts
arch/arm/boot/dts/cros-ec-keyboard.dtsi
arch/arm/boot/dts/imx23.dtsi
arch/arm/boot/dts/imx25-pdk.dts
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx51-apf51dev.dts
arch/arm/boot/dts/imx53-ard.dts
arch/arm/boot/dts/imx53-m53evk.dts
arch/arm/boot/dts/imx53-qsb-common.dtsi
arch/arm/boot/dts/imx53-smd.dts
arch/arm/boot/dts/imx53-tqma53.dtsi
arch/arm/boot/dts/imx53-tx53.dtsi
arch/arm/boot/dts/imx53-voipac-bsb.dts
arch/arm/boot/dts/imx6dl-riotboard.dts
arch/arm/boot/dts/imx6q-arm2.dts
arch/arm/boot/dts/imx6q-gk802.dts
arch/arm/boot/dts/imx6q-tbs2910.dts
arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/imx6qdl-rex.dtsi
arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
arch/arm/boot/dts/imx6qdl-tx6.dtsi
arch/arm/boot/dts/imx6qdl-wandboard.dtsi
arch/arm/boot/dts/imx6sl-evk.dts
arch/arm/boot/dts/imx6sx-sabreauto.dts
arch/arm/boot/dts/imx6sx-sdb.dtsi
arch/arm/boot/dts/imx7d-sdb.dts
arch/arm/boot/dts/k2e.dtsi
arch/arm/boot/dts/keystone.dtsi
arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
arch/arm/boot/dts/omap3-overo-common-lcd43.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
arch/arm/boot/dts/spear1310-evb.dts
arch/arm/boot/dts/spear1310.dtsi
arch/arm/boot/dts/spear1340-evb.dts
arch/arm/boot/dts/spear1340.dtsi
arch/arm/boot/dts/spear13xx.dtsi
arch/arm/boot/dts/spear300-evb.dts
arch/arm/boot/dts/spear300.dtsi
arch/arm/boot/dts/spear310-evb.dts
arch/arm/boot/dts/spear310.dtsi
arch/arm/boot/dts/spear320-evb.dts
arch/arm/boot/dts/spear320.dtsi
arch/arm/boot/dts/spear3xx.dtsi
arch/arm/boot/dts/ste-ccu8540.dts
arch/arm/boot/dts/ste-ccu9540.dts
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/boot/dts/ste-href.dtsi
arch/arm/boot/dts/ste-hrefprev60-stuib.dts
arch/arm/boot/dts/ste-hrefprev60-tvk.dts
arch/arm/boot/dts/ste-hrefprev60.dtsi
arch/arm/boot/dts/ste-hrefv60plus-stuib.dts
arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
arch/arm/boot/dts/ste-hrefv60plus.dtsi
arch/arm/boot/dts/ste-snowball.dts
arch/arm/include/asm/Kbuild
arch/arm/include/asm/memory.h
arch/arm/include/asm/mm-arch-hooks.h [deleted file]
arch/arm/kernel/perf_event.c
arch/arm/kernel/reboot.c
arch/arm/mach-imx/gpc.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-pxa/capc7117.c
arch/arm/mach-pxa/cm-x2xx.c
arch/arm/mach-pxa/cm-x300.c
arch/arm/mach-pxa/colibri-pxa270.c
arch/arm/mach-pxa/em-x270.c
arch/arm/mach-pxa/icontrol.c
arch/arm/mach-pxa/trizeps4.c
arch/arm/mach-pxa/vpac270.c
arch/arm/mach-pxa/zeus.c
arch/arm/mach-spear/generic.h
arch/arm/mach-spear/include/mach/irqs.h
arch/arm/mach-spear/include/mach/misc_regs.h
arch/arm/mach-spear/include/mach/spear.h
arch/arm/mach-spear/include/mach/uncompress.h
arch/arm/mach-spear/pl080.c
arch/arm/mach-spear/pl080.h
arch/arm/mach-spear/restart.c
arch/arm/mach-spear/spear1310.c
arch/arm/mach-spear/spear1340.c
arch/arm/mach-spear/spear13xx.c
arch/arm/mach-spear/spear300.c
arch/arm/mach-spear/spear310.c
arch/arm/mach-spear/spear320.c
arch/arm/mach-spear/spear3xx.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/proc-v7.S
arch/arm/net/bpf_jit_32.c
arch/arm/net/bpf_jit_32.h
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/mm-arch-hooks.h [deleted file]
arch/arm64/kernel/efi.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/irq.c
arch/avr32/include/asm/Kbuild
arch/avr32/include/asm/mm-arch-hooks.h [deleted file]
arch/avr32/kernel/time.c
arch/avr32/mach-at32ap/clock.c
arch/blackfin/include/asm/Kbuild
arch/blackfin/include/asm/mm-arch-hooks.h [deleted file]
arch/c6x/include/asm/Kbuild
arch/c6x/include/asm/mm-arch-hooks.h [deleted file]
arch/cris/include/asm/Kbuild
arch/cris/include/asm/mm-arch-hooks.h [deleted file]
arch/frv/include/asm/Kbuild
arch/frv/include/asm/mm-arch-hooks.h [deleted file]
arch/h8300/include/asm/Kbuild
arch/hexagon/include/asm/Kbuild
arch/hexagon/include/asm/mm-arch-hooks.h [deleted file]
arch/ia64/include/asm/Kbuild
arch/ia64/include/asm/mm-arch-hooks.h [deleted file]
arch/m32r/include/asm/Kbuild
arch/m32r/include/asm/io.h
arch/m32r/include/asm/mm-arch-hooks.h [deleted file]
arch/m68k/Kconfig.cpu
arch/m68k/configs/m5208evb_defconfig
arch/m68k/configs/m5249evb_defconfig
arch/m68k/configs/m5272c3_defconfig
arch/m68k/configs/m5275evb_defconfig
arch/m68k/configs/m5307c3_defconfig
arch/m68k/configs/m5407c3_defconfig
arch/m68k/configs/m5475evb_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/coldfire.h
arch/m68k/include/asm/io_mm.h
arch/m68k/include/asm/mm-arch-hooks.h [deleted file]
arch/metag/include/asm/Kbuild
arch/metag/include/asm/mm-arch-hooks.h [deleted file]
arch/microblaze/include/asm/Kbuild
arch/microblaze/include/asm/mm-arch-hooks.h [deleted file]
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/include/asm/Kbuild
arch/mips/include/asm/fpu.h
arch/mips/include/asm/mach-sibyte/war.h
arch/mips/include/asm/mm-arch-hooks.h [deleted file]
arch/mips/include/uapi/asm/sigcontext.h
arch/mips/kernel/asm-offsets.c
arch/mips/sibyte/Kconfig
arch/mips/sibyte/common/bus_watcher.c
arch/mips/sibyte/sb1250/setup.c
arch/mn10300/include/asm/Kbuild
arch/mn10300/include/asm/mm-arch-hooks.h [deleted file]
arch/nios2/include/asm/Kbuild
arch/nios2/include/asm/mm-arch-hooks.h [deleted file]
arch/openrisc/Kconfig
arch/openrisc/include/asm/Kbuild
arch/openrisc/include/asm/mm-arch-hooks.h [deleted file]
arch/parisc/include/asm/Kbuild
arch/parisc/include/asm/mm-arch-hooks.h [deleted file]
arch/parisc/include/asm/pgalloc.h
arch/s390/include/asm/Kbuild
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/hugetlb.h
arch/s390/include/asm/mm-arch-hooks.h [deleted file]
arch/s390/include/asm/page.h
arch/s390/include/asm/perf_event.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/cache.c
arch/s390/kernel/entry.S
arch/s390/kernel/nmi.c
arch/s390/kernel/process.c
arch/s390/kernel/sclp.S
arch/s390/kernel/setup.c
arch/s390/kernel/traps.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit.h
arch/s390/net/bpf_jit_comp.c
arch/s390/oprofile/init.c
arch/score/include/asm/Kbuild
arch/score/include/asm/mm-arch-hooks.h [deleted file]
arch/sh/include/asm/Kbuild
arch/sh/include/asm/mm-arch-hooks.h [deleted file]
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/mm-arch-hooks.h [deleted file]
arch/sparc/net/bpf_jit_comp.c
arch/tile/include/asm/Kbuild
arch/tile/include/asm/mm-arch-hooks.h [deleted file]
arch/tile/kernel/setup.c
arch/um/include/asm/Kbuild
arch/um/include/asm/mm-arch-hooks.h [deleted file]
arch/unicore32/include/asm/Kbuild
arch/unicore32/include/asm/mm-arch-hooks.h [deleted file]
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/Kbuild
arch/x86/include/asm/fpu/types.h
arch/x86/include/asm/intel_pmc_ipc.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mm-arch-hooks.h [deleted file]
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/processor.h
arch/x86/include/uapi/asm/hyperv.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/nmi.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/iommu.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mtrr.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/ioremap.c
arch/x86/mm/mmap.c
arch/x86/mm/mpx.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit_comp.c
arch/xtensa/include/asm/Kbuild
arch/xtensa/include/asm/mm-arch-hooks.h [deleted file]
block/bio-integrity.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
drivers/acpi/device_pm.c
drivers/acpi/resource.c
drivers/ata/libata-core.c
drivers/ata/libata-pmp.c
drivers/ata/libata-scsi.c
drivers/ata/libata-transport.c
drivers/ata/pata_arasan_cf.c
drivers/bcma/bcma_private.h
drivers/bcma/main.c
drivers/block/null_blk.c
drivers/block/nvme-core.c
drivers/bluetooth/Kconfig
drivers/bluetooth/bfusb.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btbcm.c
drivers/bluetooth/btintel.c
drivers/bluetooth/btintel.h
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btusb.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_h5.c
drivers/bluetooth/hci_intel.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_uart.h
drivers/char/tpm/tpm-chip.c
drivers/char/tpm/tpm_crb.c
drivers/clk/spear/clk-aux-synth.c
drivers/clk/spear/clk-frac-synth.c
drivers/clk/spear/clk-gpt-synth.c
drivers/clk/spear/clk-vco-pll.c
drivers/clk/spear/clk.c
drivers/clk/spear/clk.h
drivers/clk/spear/spear1310_clock.c
drivers/clk/spear/spear1340_clock.c
drivers/clk/spear/spear3xx_clock.c
drivers/clk/spear/spear6xx_clock.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/cpuidle.c
drivers/dma/dw/core.c
drivers/firmware/efi/cper.c
drivers/gpio/gpio-brcmstb.c
drivers/gpio/gpio-davinci.c
drivers/gpio/gpio-max732x.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-xilinx.c
drivers/gpio/gpio-zynq.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/cz_dpm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_gem.c
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_ioc32.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_ioc32.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/imx-tve.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_platform.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nv04_fbcon.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv50_fbcon.c
drivers/gpu/drm/nouveau/nvc0_fbcon.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_audio.h
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hwmon/nct7802.c
drivers/hwmon/nct7904.c
drivers/iio/accel/bmc150-accel.c
drivers/iio/accel/mma8452.c
drivers/iio/adc/Kconfig
drivers/iio/adc/at91_adc.c
drivers/iio/adc/mcp320x.c
drivers/iio/adc/rockchip_saradc.c
drivers/iio/adc/twl4030-madc.c
drivers/iio/adc/vf610_adc.c
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
drivers/iio/dac/ad5624r_spi.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/light/Kconfig
drivers/iio/light/cm3323.c
drivers/iio/light/ltr501.c
drivers/iio/light/stk3310.c
drivers/iio/light/tcs3414.c
drivers/iio/magnetometer/Kconfig
drivers/iio/magnetometer/bmc150_magn.c
drivers/iio/magnetometer/mmc35240.c
drivers/iio/proximity/sx9500.c
drivers/iio/temperature/mlx90614.c
drivers/iio/temperature/tmp006.c
drivers/infiniband/core/agent.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/core/iwpm_util.h
drivers/infiniband/core/mad.c
drivers/infiniband/core/multicast.c
drivers/infiniband/core/opa_smi.h
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/smi.c
drivers/infiniband/core/smi.h
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucma.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/ehca/ehca_sqp.c
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_mad.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mthca/mthca_mad.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
drivers/infiniband/hw/ocrdma/ocrdma_stats.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/input-leds.c
drivers/input/mouse/bcm5974.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/usbtouchscreen.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/spear-shirq.c
drivers/isdn/gigaset/ser-gigaset.c
drivers/md/Kconfig
drivers/md/bcache/closure.h
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/bitmap.c
drivers/md/dm-cache-policy-smq.c
drivers/md/dm-cache-target.c
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/md/md-cluster.c
drivers/md/md-cluster.h
drivers/md/md.c
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/persistent-data/dm-btree.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/pci/ivtv/ivtvfb.c
drivers/mfd/stmpe-i2c.c
drivers/mfd/stmpe-spi.c
drivers/misc/mei/main.c
drivers/misc/mic/scif/scif_nodeqp.c
drivers/mmc/card/block.c
drivers/mmc/host/Kconfig
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-esdhc.h
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci-spear.c
drivers/mmc/host/sdhci.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/cc770/cc770.c
drivers/net/can/flexcan.c
drivers/net/can/grcan.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/slcan.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/ti_hecc.c
drivers/net/can/usb/ems_usb.c
drivers/net/can/usb/esd_usb2.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/can/usb/usb_8dev.c
drivers/net/dsa/Kconfig
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/sb1250-mac.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/l2t.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/ec_bhf.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hip04_mdio.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmveth.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.h
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_diag.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.h
drivers/net/ethernet/intel/i40e/i40e_hmc.c
drivers/net/ethernet/intel/i40e/i40e_hmc.h
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/Kconfig
drivers/net/ethernet/mellanox/Makefile
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.h
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/Kconfig [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/Makefile [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/cmd.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/emad.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/item.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/pci.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/pci.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/port.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/reg.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/switchx2.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/trap.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/txheader.h [new file with mode: 0644]
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/s2io.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/rocker/rocker.h
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/synopsys/Kconfig [new file with mode: 0644]
drivers/net/ethernet/synopsys/Makefile [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc_eth_qos.c [new file with mode: 0644]
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/netcp.h
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/ti/netcp_sgmii.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/ifb.c
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/aquantia.c [new file with mode: 0644]
drivers/net/phy/dp83640.c
drivers/net/phy/dp83867.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-octeon.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phy.c
drivers/net/phy/spi_ks8995.c
drivers/net/phy/teranetics.c [new file with mode: 0644]
drivers/net/tun.c
drivers/net/usb/Kconfig
drivers/net/usb/Makefile
drivers/net/usb/lan78xx.c [new file with mode: 0644]
drivers/net/usb/lan78xx.h [new file with mode: 0644]
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/Makefile
drivers/net/wireless/ath/ath10k/bmi.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/rx_desc.h
drivers/net/wireless/ath/ath10k/swap.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/swap.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/targaddrs.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/wmi-ops.h
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath10k/wow.c
drivers/net/wireless/ath/ath6kl/htc.h
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/channel.c
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/debug_sta.c
drivers/net/wireless/ath/ath9k/dfs.c
drivers/net/wireless/ath/ath9k/htc_drv_init.c
drivers/net/wireless/ath/ath9k/htc_hst.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/wmi.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/debug.c
drivers/net/wireless/ath/dfs_pri_detector.c
drivers/net/wireless/ath/wil6210/Makefile
drivers/net/wireless/ath/wil6210/boot_loader.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/ethtool.c
drivers/net/wireless/ath/wil6210/fw.c
drivers/net/wireless/ath/wil6210/fw_inc.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/ioctl.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/pm.c [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/txrx.h
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wil_platform.c
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/b43/lo.c
drivers/net/wireless/b43/lo.h
drivers/net/wireless/b43/phy_g.c
drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/core.h
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/cw1200/cw1200_spi.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/debug.c
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/dvm/rs.c
drivers/net/wireless/iwlwifi/dvm/rx.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/dvm/scan.c
drivers/net/wireless/iwlwifi/dvm/sta.c
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-8000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
drivers/net/wireless/iwlwifi/iwl-fh.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-notif-wait.c
drivers/net/wireless/iwlwifi/iwl-notif-wait.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-op-mode.h
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/Makefile
drivers/net/wireless/iwlwifi/mvm/coex.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/constants.h
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/tdls.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/time-event.h
drivers/net/wireless/iwlwifi/mvm/tof.c [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/tof.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/tt.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mediatek/mt7601u/dma.c
drivers/net/wireless/mediatek/mt7601u/init.c
drivers/net/wireless/mediatek/mt7601u/mac.c
drivers/net/wireless/mediatek/mt7601u/mt7601u.h
drivers/net/wireless/mediatek/mt7601u/tx.c
drivers/net/wireless/mediatek/mt7601u/usb.c
drivers/net/wireless/mediatek/mt7601u/usb.h
drivers/net/wireless/mwifiex/Kconfig
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/pcie.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/mwifiex/sdio.h
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/mwifiex/tdls.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/usb.h
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/orinoco/main.c
drivers/net/wireless/orinoco/orinoco_cs.c
drivers/net/wireless/orinoco/orinoco_nortel.c
drivers/net/wireless/orinoco/orinoco_pci.c
drivers/net/wireless/orinoco/orinoco_plx.c
drivers/net/wireless/orinoco/orinoco_usb.c
drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
drivers/net/wireless/rtlwifi/rtl8192cu/def.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192de/dm.c
drivers/net/wireless/rtlwifi/rtl8192de/fw.h
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/wl12xx/scan.c
drivers/net/wireless/ti/wl18xx/acx.c
drivers/net/wireless/ti/wl18xx/acx.h
drivers/net/wireless/ti/wl18xx/debugfs.c
drivers/net/wireless/ti/wl18xx/event.c
drivers/net/wireless/ti/wl18xx/event.h
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/scan.c
drivers/net/wireless/ti/wl18xx/scan.h
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/cmd.h
drivers/net/wireless/ti/wlcore/conf.h
drivers/net/wireless/ti/wlcore/init.c
drivers/net/wireless/ti/wlcore/init.h
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/wireless/ti/wlcore/rx.h
drivers/net/wireless/ti/wlcore/scan.h
drivers/net/wireless/ti/wlcore/sdio.c
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/xen-netback/common.h
drivers/net/xen-netback/netback.c
drivers/nvdimm/region_devs.c
drivers/of/Kconfig
drivers/of/of_mdio.c
drivers/of/unittest.c
drivers/parport/share.c
drivers/phy/Kconfig
drivers/phy/phy-berlin-usb.c
drivers/phy/phy-ti-pipe3.c
drivers/pinctrl/bcm/pinctrl-bcm2835.c
drivers/pinctrl/freescale/pinctrl-imx1-core.c
drivers/pinctrl/nomadik/pinctrl-abx500.c
drivers/pinctrl/pinctrl-lpc18xx.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/samsung/pinctrl-samsung.c
drivers/pinctrl/sh-pfc/sh_pfc.h
drivers/pinctrl/spear/pinctrl-spear.c
drivers/pinctrl/spear/pinctrl-spear.h
drivers/pinctrl/spear/pinctrl-spear1310.c
drivers/pinctrl/spear/pinctrl-spear1340.c
drivers/pinctrl/spear/pinctrl-spear300.c
drivers/pinctrl/spear/pinctrl-spear310.c
drivers/pinctrl/spear/pinctrl-spear320.c
drivers/pinctrl/spear/pinctrl-spear3xx.c
drivers/pinctrl/spear/pinctrl-spear3xx.h
drivers/platform/x86/dell-laptop.c
drivers/platform/x86/intel_pmc_ipc.c
drivers/platform/x86/intel_scu_ipc.c
drivers/regulator/88pm800.c
drivers/regulator/core.c
drivers/regulator/max8973-regulator.c
drivers/regulator/s2mps11.c
drivers/rtc/rtc-armada38x.c
drivers/rtc/rtc-mt6397.c
drivers/s390/Makefile
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/s390/char/sclp_early.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/virtio/Makefile [moved from drivers/s390/kvm/Makefile with 100% similarity]
drivers/s390/virtio/kvm_virtio.c [moved from drivers/s390/kvm/kvm_virtio.c with 100% similarity]
drivers/s390/virtio/virtio_ccw.c [moved from drivers/s390/kvm/virtio_ccw.c with 100% similarity]
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/st.c
drivers/scsi/virtio_scsi.c
drivers/spi/Kconfig
drivers/spi/spi-img-spfi.c
drivers/spi/spi-imx.c
drivers/spi/spi-zynqmp-gqspi.c
drivers/spi/spidev.c
drivers/staging/board/Kconfig
drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
drivers/staging/vt6655/device_main.c
drivers/staging/vt6656/main_usb.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/target_core_configfs.c
drivers/target/target_core_pr.c
drivers/target/target_core_rd.c
drivers/target/target_core_spc.c
drivers/tty/n_tty.c
drivers/tty/serial/Kconfig
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/etraxfs-uart.c
drivers/tty/serial/imx.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/serial_core.c
drivers/tty/vt/selection.c
drivers/tty/vt/vt.c
drivers/usb/class/cdc-acm.c
drivers/usb/common/ulpi.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/usb.h
drivers/usb/dwc2/core.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd.h
drivers/usb/dwc2/hcd_queue.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/ep0.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/udc/fotg210-udc.c
drivers/usb/gadget/udc/mv_udc_core.c
drivers/usb/gadget/udc/udc-core.c
drivers/usb/host/ohci-q.c
drivers/usb/host/ohci-tmio.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_virthub.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/option.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/unusual_devs.h
drivers/vfio/vfio.c
drivers/vhost/vhost.c
drivers/watchdog/sp805_wdt.c
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/transaction.c
fs/configfs/item.c
fs/dax.c
fs/f2fs/data.c
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/segment.c
fs/fs-writeback.c
fs/jfs/file.c
fs/jfs/inode.c
fs/jfs/namei.c
fs/locks.c
fs/namespace.c
fs/nfs/client.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs42proc.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/write.c
fs/pnode.h
fs/proc/Kconfig
fs/proc/base.c
fs/proc/kcore.c
fs/udf/inode.c
fs/xfs/libxfs/xfs_attr_remote.c
fs/xfs/xfs_file.c
fs/xfs/xfs_log_recover.c
include/asm-generic/mm-arch-hooks.h [new file with mode: 0644]
include/linux/amba/sp810.h
include/linux/ata.h
include/linux/blk-cgroup.h
include/linux/bpf.h
include/linux/clkdev.h
include/linux/compat.h
include/linux/configfs.h
include/linux/cper.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/dcache.h
include/linux/device.h
include/linux/filter.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/gpio/driver.h
include/linux/hid-sensor-hub.h
include/linux/hugetlb.h
include/linux/init.h
include/linux/iommu.h
include/linux/ipv6.h
include/linux/kernel.h
include/linux/kobject.h
include/linux/kvm_host.h
include/linux/libata.h
include/linux/mlx4/cq.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mmiotrace.h
include/linux/module.h
include/linux/mpls_iptunnel.h [new file with mode: 0644]
include/linux/mtd/nand.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_bridge.h
include/linux/nfs_fs.h
include/linux/nfs_fs_sb.h
include/linux/of_device.h
include/linux/page_owner.h
include/linux/pata_arasan_cf_data.h
include/linux/phy.h
include/linux/platform_data/macb.h
include/linux/platform_data/mmc-esdhc-imx.h
include/linux/printk.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/stmmac.h
include/net/act_api.h
include/net/addrconf.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bond_options.h
include/net/cfg802154.h
include/net/cls_cgroup.h
include/net/dst.h
include/net/dst_metadata.h [new file with mode: 0644]
include/net/fib_rules.h
include/net/flow.h
include/net/inet_frag.h
include/net/inet_hashtables.h
include/net/inet_timewait_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip_fib.h
include/net/ip_tunnels.h
include/net/ipv6.h
include/net/lwtunnel.h [new file with mode: 0644]
include/net/mac802154.h
include/net/mpls_iptunnel.h [new file with mode: 0644]
include/net/netfilter/nf_conntrack.h
include/net/netns/conntrack.h
include/net/netns/ipv6.h
include/net/netns/netfilter.h
include/net/route.h
include/net/rtnetlink.h
include/net/sch_generic.h
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_gact.h
include/net/tc_act/tc_mirred.h
include/net/tcp.h
include/net/timewait_sock.h
include/net/vxlan.h
include/rdma/ib_verbs.h
include/scsi/scsi_transport_srp.h
include/target/iscsi/iscsi_target_core.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/i915_drm.h
include/uapi/drm/radeon_drm.h
include/uapi/linux/Kbuild
include/uapi/linux/bpf.h
include/uapi/linux/ethtool.h
include/uapi/linux/fib_rules.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/ipv6.h
include/uapi/linux/lwtunnel.h [new file with mode: 0644]
include/uapi/linux/mpls.h
include/uapi/linux/mpls_iptunnel.h [new file with mode: 0644]
include/uapi/linux/netfilter/nf_conntrack_sctp.h
include/uapi/linux/netfilter/nfnetlink_cttimeout.h
include/uapi/linux/openvswitch.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/snmp.h
include/uapi/linux/virtio_net.h
include/uapi/linux/virtio_pci.h
include/uapi/linux/virtio_ring.h
include/uapi/sound/asoc.h
kernel/bpf/core.c
kernel/bpf/verifier.c
kernel/cpu.c
kernel/fork.c
kernel/irq/resend.c
kernel/resource.c
kernel/sched/fair.c
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/trace/ftrace.c
kernel/trace/trace.h
kernel/trace/trace_branch.c
lib/decompress.c
lib/dma-debug.c
lib/hexdump.c
lib/kobject.c
lib/test_bpf.c
lib/test_rhashtable.c
mm/cma_debug.c
mm/page_alloc.c
mm/page_owner.c
net/6lowpan/iphc.c
net/9p/trans_virtio.c
net/Kconfig
net/atm/br2684.c
net/ax25/ax25_subr.c
net/bluetooth/6lowpan.c
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/a2mp.c
net/bluetooth/a2mp.h
net/bluetooth/amp.c
net/bluetooth/amp.h
net/bluetooth/cmtp/capi.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bridge/br_device.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netfilter_ipv6.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_stp.c
net/bridge/br_stp_if.c
net/bridge/br_stp_timer.c
net/caif/caif_socket.c
net/core/Makefile
net/core/datagram.c
net/core/dev.c
net/core/dst.c
net/core/fib_rules.c
net/core/filter.c
net/core/flow_dissector.c
net/core/lwtunnel.c [new file with mode: 0644]
net/core/net-sysfs.c
net/core/netclassid_cgroup.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/sock.c
net/core/timestamping.c
net/dccp/proto.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/dsa/tag_brcm.c
net/dsa/tag_dsa.c
net/dsa/tag_edsa.c
net/dsa/tag_trailer.c
net/ieee802154/6lowpan/reassembly.c
net/ieee802154/rdev-ops.h
net/ieee802154/sysfs.c
net/ieee802154/trace.h
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/datagram.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_lookup.h
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_input.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cdg.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/icmp.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/llc/af_llc.c
net/mac802154/cfg.c
net/mac802154/ieee802154_i.h
net/mac802154/iface.c
net/mac802154/main.c
net/mac802154/rx.c
net/mac802154/tx.c
net/mac802154/util.c
net/mpls/Kconfig
net/mpls/Makefile
net/mpls/af_mpls.c
net/mpls/internal.h
net/mpls/mpls_iptunnel.c [new file with mode: 0644]
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sched.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_internals.h
net/netfilter/nf_queue.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nft_meta.c
net/netfilter/x_tables.c
net/netfilter/xt_CT.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_TEE.c
net/netfilter/xt_TPROXY.c
net/netlink/af_netlink.c
net/openvswitch/Makefile
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/dp_notify.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_netlink.h
net/openvswitch/flow_table.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport-netdev.h
net/openvswitch/vport-vxlan.c
net/openvswitch/vport-vxlan.h [deleted file]
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/rds/ib_rdma.c
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_vlan.c
net/sched/cls_bpf.c
net/sched/cls_cgroup.c
net/sched/cls_flow.c
net/sched/cls_flower.c
net/sched/sch_choke.c
net/sched/sch_fq_codel.c
net/sched/sch_plug.c
net/sched/sch_qfq.c
net/sched/sch_sfq.c
net/sctp/protocol.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/clnt.c
net/sunrpc/xprtsock.c
net/switchdev/switchdev.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/core.h
net/tipc/discover.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/tipc/socket.h
net/tipc/udp_media.c
net/xfrm/xfrm_user.c
samples/bpf/bpf_helpers.h
samples/bpf/test_verifier.c
samples/bpf/tracex1_kern.c
samples/bpf/tracex2_kern.c
samples/bpf/tracex3_kern.c
samples/bpf/tracex4_kern.c
samples/bpf/tracex5_kern.c
samples/trace_events/trace-events-sample.h
scripts/checkpatch.pl
security/keys/keyring.c
sound/core/pcm_native.c
sound/firewire/fireworks/fireworks.c
sound/firewire/fireworks/fireworks.h
sound/firewire/fireworks/fireworks_stream.c
sound/hda/hdac_i915.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/pcm1681.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/sgtl5000.h
sound/soc/codecs/ssm4567.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/Makefile
sound/soc/intel/atom/sst/sst_drv_interface.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/mediatek/mt8173-max98090.c
sound/soc/mediatek/mt8173-rt5650-rt5676.c
sound/soc/mediatek/mtk-afe-pcm.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-topology.c
sound/soc/zte/zx296702-i2s.c
sound/soc/zte/zx296702-spdif.c
sound/sparc/amd7930.c
sound/usb/line6/pcm.c
sound/usb/mixer_maps.c
sound/usb/quirks-table.h
tools/lib/api/Makefile
tools/lib/hweight.c [new file with mode: 0644]
tools/lib/traceevent/Makefile
tools/net/bpf_jit_disasm.c
tools/perf/MANIFEST
tools/perf/Makefile.perf
tools/perf/builtin-stat.c
tools/perf/ui/browsers/hists.c
tools/perf/util/Build
tools/perf/util/auxtrace.c
tools/perf/util/python-ext-sources
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/thread_map.c
tools/perf/util/vdso.c
tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
virt/kvm/vfio.c

index 977f958eedbe6a6e56f63033b17c3cb66cd8b8a3..b4091b7a78fe11ccd0e5f44f0703ace69dc09707 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -116,6 +116,7 @@ Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
+Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
 Sumit Semwal <sumit.semwal@ti.com>
 Tejun Heo <htejun@gmail.com>
 Thomas Graf <tgraf@suug.ch>
@@ -125,7 +126,9 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
 Uwe Kleine-König <ukl@pengutronix.de>
 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
-Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com>
+Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
+Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
+Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
index bbed111c31b4ed7658e61ac6b7140284c3035dc6..70c9b1ac66dbc5880c90312b47ba0b28669a77a6 100644 (file)
@@ -1234,10 +1234,8 @@ Description:
                object is near the sensor, usually be observing
                reflectivity of infrared or ultrasound emitted.
                Often these sensors are unit less and as such conversion
-               to SI units is not possible.  Where it is, the units should
-               be meters.  If such a conversion is not possible, the reported
-               values should behave in the same way as a distance, i.e. lower
-               values indicate something is closer to the sensor.
+               to SI units is not possible. Higher proximity measurements
+               indicate closer objects, and vice versa.
 
 What:          /sys/.../iio:deviceX/in_illuminance_input
 What:          /sys/.../iio:deviceX/in_illuminance_raw
index c0312cbd023d249e0bb34ab1b2970b5036f3ef30..2fb9a5457522a48cf590f88d14463dfafefb668a 100644 (file)
@@ -3383,7 +3383,7 @@ void intel_crt_init(struct drm_device *dev)
        <td valign="top" >TBD</td>
        </tr>
        <tr>
-       <td rowspan="2" valign="top" >omap</td>
+       <td valign="top" >omap</td>
        <td valign="top" >Generic</td>
        <td valign="top" >“zorder”</td>
        <td valign="top" >RANGE</td>
index 65610bf52ebffbad7e13d8f522df603487ef59e2..1b049be6c84f0e4bddfb2dbf212edf71ce91be4e 100644 (file)
@@ -60,4 +60,4 @@ Introduction
   Document Author
   ---------------
 
-  Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics
+  Viresh Kumar <vireshk@kernel.org>, (c) 2010-2012 ST Microelectronics
index 82960cffbad346d73a8e47ebd188a54a9db4e4c1..785eab87aa71dc68aceeb970676a614c9c2c6381 100644 (file)
@@ -258,6 +258,12 @@ cache metadata mode      : ro if read-only, rw if read-write
        no further I/O will be permitted and the status will just
        contain the string 'Fail'.  The userspace recovery tools
        should then be used.
+needs_check             : 'needs_check' if set, '-' if not set
+       A metadata operation has failed, resulting in the needs_check
+       flag being set in the metadata's superblock.  The metadata
+       device must be deactivated and checked/repaired before the
+       cache can be made fully operational again.  '-' indicates
+       needs_check is not set.
 
 Messages
 --------
index 4f67578b295483bcc14d48f069c6ded3581e3f94..1699a55b7b709adddd18b97cd769cb8946662e48 100644 (file)
@@ -296,7 +296,7 @@ ii) Status
        underlying device.  When this is enabled when loading the table,
        it can get disabled if the underlying device doesn't support it.
 
-    ro|rw
+    ro|rw|out_of_data_space
        If the pool encounters certain types of device failures it will
        drop into a read-only metadata mode in which no changes to
        the pool metadata (like allocating new blocks) are permitted.
@@ -314,6 +314,13 @@ ii) Status
        module parameter can be used to change this timeout -- it
        defaults to 60 seconds but may be disabled using a value of 0.
 
+    needs_check
+       A metadata operation has failed, resulting in the needs_check
+       flag being set in the metadata's superblock.  The metadata
+       device must be deactivated and checked/repaired before the
+       thin-pool can be made fully operational again.  '-' indicates
+       needs_check is not set.
+
 iii) Messages
 
     create_thin <dev id>
index e75f0e549fff7cfe767786a83c57da5b5a4fe5b1..971c3eedb1c7b10de4d65134f8db5880fffd3462 100644 (file)
@@ -65,8 +65,10 @@ Optional properties:
 - edid: verbatim EDID data block describing attached display.
 - ddc: phandle describing the i2c bus handling the display data
   channel
-- port: A port node with endpoint definitions as defined in
+- port@[0-1]: Port nodes with endpoint definitions as defined in
   Documentation/devicetree/bindings/media/video-interfaces.txt.
+  Port 0 is the input port connected to the IPU display interface,
+  port 1 is the output port connected to a panel.
 
 example:
 
@@ -75,9 +77,29 @@ display@di0 {
        edid = [edid-data];
        interface-pix-fmt = "rgb24";
 
-       port {
+       port@0 {
+               reg = <0>;
+
                display_in: endpoint {
                        remote-endpoint = <&ipu_di0_disp0>;
                };
        };
+
+       port@1 {
+               reg = <1>;
+
+               display_out: endpoint {
+                       remote-endpoint = <&panel_in>;
+               };
+       };
+};
+
+panel {
+       ...
+
+       port {
+               panel_in: endpoint {
+                       remote-endpoint = <&display_out>;
+               };
+       };
 };
index c03eec1168721bdb1c851f7c42eba34ec48f4ba7..3443e0f838dfc8a53e548527a05cf9892f2c2a92 100644 (file)
@@ -35,3 +35,6 @@ the PCIe specification.
 
                       NOTE: this only applies to the SMMU itself, not
                       masters connected upstream of the SMMU.
+
+- hisilicon,broken-prefetch-cmd
+                    : Avoid sending CMD_PREFETCH_* commands to the SMMU.
index 5d0376b8f2026ed57daabd33d684590d02ff5920..211e7785f4d240ec2ffc7258839f740e8b614b78 100644 (file)
@@ -17,7 +17,6 @@ Required properties:
               "fsl,imx6sx-usdhc"
 
 Optional properties:
-- fsl,cd-controller : Indicate to use controller internal card detection
 - fsl,wp-controller : Indicate to use controller internal write protection
 - fsl,delay-line : Specify the number of delay cells for override mode.
   This is used to set the clock delay for DLL(Delay Line) on override mode
@@ -35,7 +34,6 @@ esdhc@70004000 {
        compatible = "fsl,imx51-esdhc";
        reg = <0x70004000 0x4000>;
        interrupts = <1>;
-       fsl,cd-controller;
        fsl,wp-controller;
 };
 
index 41b3f3f864e84d7f6941bb61280870d70ea687b7..5d88f37480b6a75e13f61e782829bf9babbf46a9 100644 (file)
@@ -25,7 +25,11 @@ The following properties are common to the Ethernet controllers:
   flow control thresholds.
 - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
   is used for components that can have configurable fifo sizes.
+- managed: string, specifies the PHY management type. Supported values are:
+  "auto", "in-band-status". "auto" is the default, it usess MDIO for
+  management if fixed-link is not specified.
 
 Child nodes of the Ethernet controller are typically the individual PHY devices
 connected via the MDIO bus (sometimes the MDIO bus controller is separate).
 They are described in the phy.txt file in this same directory.
+For non-MDIO PHY management see fixed-link.txt.
index d0e6fa38f335fcfa10f0840221bc663e4a5c9f31..b30ab6b5cbfa9f0f3666165c1fb9641a8d375fb5 100644 (file)
@@ -130,7 +130,11 @@ Required properties:
 
 Optional properties:
 - efuse-mac:   If this is 1, then the MAC address for the interface is
-               obtained from the device efuse mac address register
+               obtained from the device efuse mac address register.
+               If this is 2, the two DWORDs occupied by the MAC address
+               are swapped.  The netcp driver will swap the two DWORDs
+               back to the proper order when this property is set to 2
+               when it obtains the mac address from efuse.
 - local-mac-address:   the driver is designed to use the of_get_mac_address api
                        only if efuse-mac is 0. When efuse-mac is 0, the MAC
                        address is obtained from local-mac-address. If this
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
new file mode 100644 (file)
index 0000000..51f8d2e
--- /dev/null
@@ -0,0 +1,75 @@
+* Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
+
+
+Required properties:
+- compatible: Should be "snps,dwc-qos-ethernet-4.10"
+- reg: Address and length of the register set for the device
+- clocks: Phandles to the reference clock and the bus clock
+- clock-names: Should be "phy_ref_clk" for the reference clock and "apb_pclk"
+  for the bus clock.
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the core's combined interrupt signal
+- phy-mode: See ethernet.txt file in the same directory
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+- mac-address: See ethernet.txt in the same directory
+- local-mac-address: See ethernet.txt in the same directory
+- snps,en-lpi: If present it enables use of the AXI low-power interface
+- snps,write-requests: Number of write requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,read-requests: Number of read requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,burst-map: Bitmap of allowed AXI burst lengts, with the LSB
+  representing 4, then 8 etc.
+- snps,txpbl: DMA Programmable burst length for the TX DMA
+- snps,rxpbl: DMA Programmable burst length for the RX DMA
+- snps,en-tx-lpi-clockgating: Enable gating of the MAC TX clock during
+  TX low-power mode.
+- phy-handle: See ethernet.txt file in the same directory
+- mdio device tree subnode: When the GMAC has a phy connected to its local
+    mdio, there must be device tree subnode with the following
+    required properties:
+    - compatible: Must be "snps,dwc-qos-ethernet-mdio".
+    - #address-cells: Must be <1>.
+    - #size-cells: Must be <0>.
+
+    For each phy on the mdio bus, there must be a node with the following
+    fields:
+
+    - reg: phy id used to communicate to phy.
+    - device_type: Must be "ethernet-phy".
+    - fixed-mode device tree subnode: see fixed-link.txt in the same directory
+
+Examples:
+ethernet2@40010000 {
+       clock-names = "phy_ref_clk", "apb_pclk";
+       clocks = <&clkc 17>, <&clkc 15>;
+       compatible = "snps,dwc-qos-ethernet-4.10";
+       interrupt-parent = <&intc>;
+       interrupts = <0x0 0x1e 0x4>;
+       reg = <0x40010000 0x4000>;
+       phy-handle = <&phy2>;
+       phy-mode = "gmii";
+
+       snps,en-tx-lpi-clockgating;
+       snps,en-lpi;
+       snps,write-requests = <2>;
+       snps,read-requests = <16>;
+       snps,burst-map = <0x7>;
+       snps,txpbl = <8>;
+       snps,rxpbl = <2>;
+
+       dma-coherent;
+
+       mdio {
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+               phy2: phy@1 {
+                       compatible = "ethernet-phy-ieee802.3-c22";
+                       device_type = "ethernet-phy";
+                       reg = <0x1>;
+               };
+       };
+};
index 829bd26d17f86e753ca54f2a4750fd86db61a641..519e97c8f1b8c2029c30785cc39a97ae317e9e64 100644 (file)
@@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
 Required properties:
 - compatible : "mediatek,mt8173-max98090"
 - mediatek,audio-codec: the phandle of the MAX98090 audio codec
+- mediatek,platform: the phandle of MT8173 ASoC platform
 
 Example:
 
        sound {
                compatible = "mediatek,mt8173-max98090";
                mediatek,audio-codec = <&max98090>;
+               mediatek,platform = <&afe>;
        };
 
index 61e98c976bd4012b99fc218b729e6fc006209b84..f205ce9e31dd5d31e981bac826e1f69e819434ca 100644 (file)
@@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
 Required properties:
 - compatible : "mediatek,mt8173-rt5650-rt5676"
 - mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
+- mediatek,platform: the phandle of MT8173 ASoC platform
 
 Example:
 
        sound {
                compatible = "mediatek,mt8173-rt5650-rt5676";
                mediatek,audio-codec = <&rt5650 &rt5676>;
+               mediatek,platform = <&afe>;
        };
 
index f1ad9c367532437b370b412cb9f5c68b8f73c494..9c696fa66f818eb98144c628aa1b7000f96a6695 100644 (file)
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
 Required properties:
 - compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
 - reg: Base address and size of the controllers memory area
-- clocks: phandle to the AHB clock.
+- clocks: phandle of the AHB clock.
 - clock-names: has to be "ahb".
 - #address-cells: <1>, as required by generic SPI binding.
 - #size-cells: <0>, also as required by generic SPI binding.
@@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
 
 Example:
 
-       spi@1F000000 {
+       spi@1f000000 {
                compatible = "qca,ar9132-spi", "qca,ar7100-spi";
-               reg = <0x1F000000 0x10>;
+               reg = <0x1f000000 0x10>;
 
                clocks = <&pll 2>;
                clock-names = "ahb";
index 014f112e2a14e19557878a250e30438fd8f91b73..57fffe33ebfcdef2c9b52831667e147007c9454e 100644 (file)
@@ -35,11 +35,11 @@ temp1_input         Local temperature (1/1000 degree,
 temp[2-9]_input                CPU temperatures (1/1000 degree,
                        0.125 degree resolution)
 
-fan[1-4]_mode          R/W, 0/1 for manual or SmartFan mode
+pwm[1-4]_enable                R/W, 1/2 for manual or SmartFan mode
                        Setting SmartFan mode is supported only if it has been
                        previously configured by BIOS (or configuration EEPROM)
 
-fan[1-4]_pwm           R/O in SmartFan mode, R/W in manual control mode
+pwm[1-4]               R/O in SmartFan mode, R/W in manual control mode
 
 The driver checks sensor control registers and does not export the sensors
 that are not enabled. Anyway, a sensor that is enabled may actually be not
index e63b446d973cd716097adb60295577fe558f3f96..13f888a02a3de5cb7ecfdb2ed03eb1d721601de9 100644 (file)
@@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
        $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
        mode) if this option is supported by $(AR).
 
+    ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS   Overrides the kbuild defaults
+
+       These variables are appended to the KBUILD_CPPFLAGS,
+       KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
+       top-level Makefile has set any other flags. This provides a
+       means for an architecture to override the defaults.
+
+
 --- 6.2 Add prerequisites to archheaders:
 
        The archheaders: rule is used to generate header files that
index 5fae7704daab292cf900158666c2d4bb80dd2424..56db1efd7189ac6b25fdfc4a18b579f28e01a4b7 100644 (file)
@@ -1215,14 +1215,20 @@ flowlabel_consistency - BOOLEAN
        FALSE: disabled
        Default: TRUE
 
-auto_flowlabels - BOOLEAN
-       Automatically generate flow labels based based on a flow hash
-       of the packet. This allows intermediate devices, such as routers,
-       to idenfify packet flows for mechanisms like Equal Cost Multipath
+auto_flowlabels - INTEGER
+       Automatically generate flow labels based on a flow hash of the
+       packet. This allows intermediate devices, such as routers, to
+       identify packet flows for mechanisms like Equal Cost Multipath
        Routing (see RFC 6438).
-       TRUE: enabled
-       FALSE: disabled
-       Default: false
+       0: automatic flow labels are completely disabled
+       1: automatic flow labels are enabled by default, they can be
+          disabled on a per socket basis using the IPV6_AUTOFLOWLABEL
+          socket option
+       2: automatic flow labels are allowed, they may be enabled on a
+          per socket basis using the IPV6_AUTOFLOWLABEL socket option
+       3: automatic flow labels are enabled and enforced, they cannot
+          be disabled by the socket option
+       Default: 1
 
 flowlabel_state_ranges - BOOLEAN
        Split the flow label number space into two ranges. 0-0x7FFFF is
@@ -1340,6 +1346,14 @@ accept_ra_from_local - BOOLEAN
           disabled if accept_ra_from_local is disabled
                on a specific interface.
 
+accept_ra_min_hop_limit - INTEGER
+       Minimum hop limit Information in Router Advertisement.
+
+       Hop limit Information in Router Advertisement less than this
+       variable shall be ignored.
+
+       Default: 1
+
 accept_ra_pinfo - BOOLEAN
        Learn Prefix Information in Router Advertisement.
 
@@ -1435,6 +1449,11 @@ mtu - INTEGER
        Default Maximum Transfer Unit
        Default: 1280 (IPv6 required minimum)
 
+ip_nonlocal_bind - BOOLEAN
+       If set, allows processes to bind() to non-local IPv6 addresses,
+       which can be quite useful - but may break some applications.
+       Default: 0
+
 router_probe_interval - INTEGER
        Minimum interval (in seconds) between Router Probing described
        in RFC4191.
@@ -1455,6 +1474,13 @@ router_solicitations - INTEGER
        routers are present.
        Default: 3
 
+use_oif_addrs_only - BOOLEAN
+       When enabled, the candidate source addresses for destinations
+       routed via this interface are restricted to the set of addresses
+       configured on this interface (vis. RFC 6724, section 4).
+
+       Default: false
+
 use_tempaddr - INTEGER
        Preference for Privacy Extensions (RFC3041).
          <= 0 : disable Privacy Extensions
index e655e2453c9842e8bc9d28cce3b61e79a54c1fbf..2903b1cf4d702c639fe2d43056fcf9dca1a641d9 100644 (file)
@@ -135,12 +135,8 @@ struct plat_stmmacenet_data {
        int maxmtu;
        void (*fix_mac_speed)(void *priv, unsigned int speed);
        void (*bus_setup)(void __iomem *ioaddr);
-       void *(*setup)(struct platform_device *pdev);
-       void (*free)(struct platform_device *pdev, void *priv);
        int (*init)(struct platform_device *pdev, void *priv);
        void (*exit)(struct platform_device *pdev, void *priv);
-       void *custom_cfg;
-       void *custom_data;
        void *bsp_priv;
 };
 
@@ -179,15 +175,11 @@ Where:
  o bus_setup: perform HW setup of the bus. For example, on some ST platforms
             this field is used to configure the AMBA  bridge to generate more
             efficient STBus traffic.
- o setup/init/exit: callbacks used for calling a custom initialization;
+ o init/exit: callbacks used for calling a custom initialization;
             this is sometime necessary on some platforms (e.g. ST boxes)
             where the HW needs to have set some PIO lines or system cfg
-            registers. setup should return a pointer to private data,
-            which will be stored in bsp_priv, and then passed to init and
-            exit callbacks. init/exit callbacks should not use or modify
+            registers.  init/exit callbacks should not use or modify
             platform data.
- o custom_cfg/custom_data: this is a custom configuration that can be passed
-                          while initializing the resources.
  o bsp_priv: another private pointer.
 
 For MDIO bus The we have:
@@ -278,8 +270,6 @@ capability register can replace what has been passed from the platform.
 Please see the following document:
        Documentation/devicetree/bindings/net/stmmac.txt
 
-and the stmmac_of_data structure inside the include/linux/stmmac.h header file.
-
 4.11) This is a summary of the content of some relevant files:
  o stmmac_main.c: to implement the main network device driver;
  o stmmac_mdio.c: to provide mdio functions;
index c5d7ade10ff21b720c0c41e7fb9f4106a11ada09..9825f32a86349c1469a114d0aa85318115fac1e0 100644 (file)
@@ -279,8 +279,18 @@ and unknown unicast packets to all ports in domain, if allowed by port's
 current STP state.  The switch driver, knowing which ports are within which
 vlan L2 domain, can program the switch device for flooding.  The packet should
 also be sent to the port netdev for processing by the bridge driver.  The
-bridge should not reflood the packet to the same ports the device flooded.
-XXX: the mechanism to avoid duplicate flood packets is being discuseed.
+bridge should not reflood the packet to the same ports the device flooded,
+otherwise there will be duplicate packets on the wire.
+
+To avoid duplicate packets, the device/driver should mark a packet as already
+forwarded using skb->offload_fwd_mark.  The same mark is set on the device
+ports in the domain using dev->offload_fwd_mark.  If the skb->offload_fwd_mark
+is non-zero and matches the forwarding egress port's dev->skb_mark, the kernel
+will drop the skb right before transmit on the egress port, with the
+understanding that the device already forwarded the packet on same egress port.
+The driver can use switchdev_port_fwd_mark_set() to set a globally unique mark
+for port's dev->offload_fwd_mark, based on the port's parent ID (switch ID) and
+a group ifindex.
 
 It is possible for the switch device to not handle flooding and push the
 packets up to the bridge driver for flooding.  This is not ideal as the number
index 5f0922613f1a8db7963fe3c44d21280d7290d84e..a977339fbe0a50d94877fdb8d9f2e8d902699612 100644 (file)
@@ -359,6 +359,13 @@ the requested fine-grained filtering for incoming packets is not
 supported, the driver may time stamp more than just the requested types
 of packets.
 
+Drivers are free to use a more permissive configuration than the requested
+configuration. It is expected that drivers should only implement directly the
+most generic mode that can be supported. For example if the hardware can
+support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
+HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
+is more generic (and more useful to applications).
+
 A driver which supports hardware time stamping shall update the struct
 with the actual, possibly more permissive configuration. If the
 requested packets cannot be time stamped, then nothing should be
index 949de191fcdc1939c9b160eb7a809afcdbc847af..cda56df9b8a7ce591f3eb254cad1d423c0a856c5 100755 (executable)
@@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "#include <linux/string.h>\n"
        buf += "#include <linux/configfs.h>\n"
        buf += "#include <linux/ctype.h>\n"
-       buf += "#include <asm/unaligned.h>\n\n"
+       buf += "#include <asm/unaligned.h>\n"
+       buf += "#include <scsi/scsi_proto.h>\n\n"
        buf += "#include <target/target_core_base.h>\n"
        buf += "#include <target/target_core_fabric.h>\n"
        buf += "#include <target/target_core_fabric_configfs.h>\n"
@@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        }\n"
        buf += "        tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
        buf += "        tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
-       buf += "        ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
-       buf += "                                &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
+
+       if proto_ident == "FC":
+               buf += "        ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
+       elif proto_ident == "SAS":
+               buf += "        ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
+       elif proto_ident == "iSCSI":
+               buf += "        ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
+
        buf += "        if (ret < 0) {\n"
        buf += "                kfree(tpg);\n"
        buf += "                return NULL;\n"
@@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
 
        buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
        buf += "        .module                         = THIS_MODULE,\n"
-       buf += "        .name                           = " + fabric_mod_name + ",\n"
+       buf += "        .name                           = \"" + fabric_mod_name + "\",\n"
        buf += "        .get_fabric_name                = " + fabric_mod_name + "_get_fabric_name,\n"
        buf += "        .tpg_get_wwn                    = " + fabric_mod_name + "_get_fabric_wwn,\n"
        buf += "        .tpg_get_tag                    = " + fabric_mod_name + "_get_tag,\n"
@@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .fabric_make_tpg                = " + fabric_mod_name + "_make_tpg,\n"
        buf += "        .fabric_drop_tpg                = " + fabric_mod_name + "_drop_tpg,\n"
        buf += "\n"
-       buf += "        .tfc_wwn_attrs                  = " + fabric_mod_name + "_wwn_attrs;\n"
+       buf += "        .tfc_wwn_attrs                  = " + fabric_mod_name + "_wwn_attrs,\n"
        buf += "};\n\n"
 
        buf += "static int __init " + fabric_mod_name + "_init(void)\n"
        buf += "{\n"
-       buf += "        return target_register_template(" + fabric_mod_name + "_ops);\n"
+       buf += "        return target_register_template(&" + fabric_mod_name + "_ops);\n"
        buf += "};\n\n"
 
        buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
        buf += "{\n"
-       buf += "        target_unregister_template(" + fabric_mod_name + "_ops);\n"
+       buf += "        target_unregister_template(&" + fabric_mod_name + "_ops);\n"
        buf += "};\n\n"
 
        buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
index 2d3d55c8f5bea3180f4304bddd7b5c59106e79db..98ede02a96f2bf5666ab2cf14ef4caceb0fdf9cb 100644 (file)
@@ -361,11 +361,11 @@ S:        Supported
 F:     drivers/input/touchscreen/ad7879.c
 
 ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR)
-M:     Jiri Kosina <jkosina@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
 S:     Maintained
 
 ADM1025 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/adm1025
@@ -430,7 +430,7 @@ S:  Maintained
 F:     drivers/macintosh/therm_adt746x.c
 
 ADT7475 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/adt7475
@@ -445,7 +445,7 @@ F:  drivers/input/misc/adxl34x.c
 
 ADVANSYS SCSI DRIVER
 M:     Matthew Wilcox <matthew@wil.cx>
-M:     Hannes Reinecke <hare@suse.de>
+M:     Hannes Reinecke <hare@suse.com>
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
 F:     Documentation/scsi/advansys.txt
@@ -506,7 +506,7 @@ F:  drivers/scsi/aha152x*
 F:     drivers/scsi/pcmcia/aha152x*
 
 AIC7XXX / AIC79XX SCSI DRIVER
-M:     Hannes Reinecke <hare@suse.de>
+M:     Hannes Reinecke <hare@suse.com>
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/aic7xxx/
@@ -746,7 +746,7 @@ S:  Maintained
 F:     sound/aoa/
 
 APM DRIVER
-M:     Jiri Kosina <jkosina@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
 S:     Odd fixes
 F:     arch/x86/kernel/apm_32.c
 F:     include/linux/apm_bios.h
@@ -1001,6 +1001,7 @@ ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
 M:     Baruch Siach <baruch@tkos.co.il>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
+F:     arch/arm/boot/dts/cx92755*
 N:     digicolor
 
 ARM/EBSA110 MACHINE SUPPORT
@@ -1324,7 +1325,7 @@ F:        arch/arm/mach-pxa/include/mach/palmtc.h
 F:     arch/arm/mach-pxa/palmtc.c
 
 ARM/PALM TREO SUPPORT
-M:     Tomas Cech <sleep_walker@suse.cz>
+M:     Tomas Cech <sleep_walker@suse.com>
 L:     linux-arm-kernel@lists.infradead.org
 W:     http://hackndev.com
 S:     Maintained
@@ -2405,7 +2406,7 @@ F:        drivers/gpio/gpio-bt8xx.c
 BTRFS FILE SYSTEM
 M:     Chris Mason <clm@fb.com>
 M:     Josef Bacik <jbacik@fb.com>
-M:     David Sterba <dsterba@suse.cz>
+M:     David Sterba <dsterba@suse.com>
 L:     linux-btrfs@vger.kernel.org
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2748,7 +2749,7 @@ COCCINELLE/Semantic Patches (SmPL)
 M:     Julia Lawall <Julia.Lawall@lip6.fr>
 M:     Gilles Muller <Gilles.Muller@lip6.fr>
 M:     Nicolas Palix <nicolas.palix@imag.fr>
-M:     Michal Marek <mmarek@suse.cz>
+M:     Michal Marek <mmarek@suse.com>
 L:     cocci@systeme.lip6.fr (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc
 W:     http://coccinelle.lip6.fr/
@@ -2864,7 +2865,7 @@ F:        kernel/cpuset.c
 
 CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
 M:     Johannes Weiner <hannes@cmpxchg.org>
-M:     Michal Hocko <mhocko@suse.cz>
+M:     Michal Hocko <mhocko@kernel.org>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
 S:     Maintained
@@ -2945,7 +2946,7 @@ F:        arch/x86/kernel/cpuid.c
 F:     arch/x86/kernel/msr.c
 
 CPU POWER MONITORING SUBSYSTEM
-M:     Thomas Renninger <trenn@suse.de>
+M:     Thomas Renninger <trenn@suse.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     tools/power/cpupower/
@@ -3175,7 +3176,7 @@ F:        Documentation/networking/dmfe.txt
 F:     drivers/net/ethernet/dec/tulip/dmfe.c
 
 DC390/AM53C974 SCSI driver
-M:     Hannes Reinecke <hare@suse.de>
+M:     Hannes Reinecke <hare@suse.com>
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/am53c974.c
@@ -3379,7 +3380,7 @@ W:        http://www.win.tue.nl/~aeb/partitions/partition_types-1.html
 S:     Maintained
 
 DISKQUOTA
-M:     Jan Kara <jack@suse.cz>
+M:     Jan Kara <jack@suse.com>
 S:     Maintained
 F:     Documentation/filesystems/quota.txt
 F:     fs/quota/
@@ -3435,7 +3436,7 @@ F:        Documentation/hwmon/dme1737
 F:     drivers/hwmon/dme1737.c
 
 DMI/SMBIOS SUPPORT
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 S:     Maintained
 T:     quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/
 F:     Documentation/ABI/testing/sysfs-firmware-dmi-tables
@@ -4051,7 +4052,7 @@ F:        drivers/of/of_mdio.c
 F:     drivers/of/of_net.c
 
 EXT2 FILE SYSTEM
-M:     Jan Kara <jack@suse.cz>
+M:     Jan Kara <jack@suse.com>
 L:     linux-ext4@vger.kernel.org
 S:     Maintained
 F:     Documentation/filesystems/ext2.txt
@@ -4059,7 +4060,7 @@ F:        fs/ext2/
 F:     include/linux/ext2*
 
 EXT3 FILE SYSTEM
-M:     Jan Kara <jack@suse.cz>
+M:     Jan Kara <jack@suse.com>
 M:     Andrew Morton <akpm@linux-foundation.org>
 M:     Andreas Dilger <adilger.kernel@dilger.ca>
 L:     linux-ext4@vger.kernel.org
@@ -4109,7 +4110,7 @@ F:        drivers/video/fbdev/exynos/exynos_mipi*
 F:     include/video/exynos_mipi*
 
 F71805F HARDWARE MONITORING DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/f71805f
@@ -4244,7 +4245,7 @@ S:        Maintained
 F:     drivers/block/rsxx/
 
 FLOPPY DRIVER
-M:     Jiri Kosina <jkosina@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
 S:     Odd fixes
 F:     drivers/block/floppy.c
@@ -4665,7 +4666,7 @@ F:        drivers/media/usb/stk1160/
 
 H8/300 ARCHITECTURE
 M:     Yoshinori Sato <ysato@users.sourceforge.jp>
-L:     uclinux-h8-devel@lists.sourceforge.jp
+L:     uclinux-h8-devel@lists.sourceforge.jp (moderated for non-subscribers)
 W:     http://uclinux-h8.sourceforge.jp
 T:     git git://git.sourceforge.jp/gitroot/uclinux-h8/linux.git
 S:     Maintained
@@ -4712,7 +4713,7 @@ S:        Maintained
 F:     drivers/media/usb/hackrf/
 
 HARDWARE MONITORING
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
@@ -4815,7 +4816,7 @@ F:        include/linux/pm.h
 F:     arch/*/include/asm/suspend*.h
 
 HID CORE LAYER
-M:     Jiri Kosina <jkosina@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
 L:     linux-input@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:     Maintained
@@ -4824,7 +4825,7 @@ F:        include/linux/hid*
 F:     include/uapi/linux/hid*
 
 HID SENSOR HUB DRIVERS
-M:     Jiri Kosina <jkosina@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
 M:     Jonathan Cameron <jic23@kernel.org>
 M:     Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
 L:     linux-input@vger.kernel.org
@@ -4958,7 +4959,7 @@ F:        include/linux/hyperv.h
 F:     tools/hv/
 
 I2C OVER PARALLEL PORT
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     Documentation/i2c/busses/i2c-parport
@@ -4967,7 +4968,7 @@ F:        drivers/i2c/busses/i2c-parport.c
 F:     drivers/i2c/busses/i2c-parport-light.c
 
 I2C/SMBUS CONTROLLER DRIVERS FOR PC
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     Documentation/i2c/busses/i2c-ali1535
@@ -5008,7 +5009,7 @@ F:        drivers/i2c/busses/i2c-ismt.c
 F:     Documentation/i2c/busses/i2c-ismt
 
 I2C/SMBUS STUB DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/i2c-stub.c
@@ -5035,7 +5036,7 @@ L:        linux-acpi@vger.kernel.org
 S:     Maintained
 
 I2C-TAOS-EVM DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     Documentation/i2c/busses/i2c-taos-evm
@@ -5564,8 +5565,8 @@ F:        include/uapi/linux/ip_vs.h
 F:     net/netfilter/ipvs/
 
 IPWIRELESS DRIVER
-M:     Jiri Kosina <jkosina@suse.cz>
-M:     David Sterba <dsterba@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
+M:     David Sterba <dsterba@suse.com>
 S:     Odd Fixes
 F:     drivers/tty/ipwireless/
 
@@ -5685,7 +5686,7 @@ S:        Maintained
 F:     drivers/isdn/hardware/eicon/
 
 IT87 HARDWARE MONITORING DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/it87
@@ -5752,7 +5753,7 @@ F:        include/uapi/linux/jffs2.h
 
 JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
 M:     Andrew Morton <akpm@linux-foundation.org>
-M:     Jan Kara <jack@suse.cz>
+M:     Jan Kara <jack@suse.com>
 L:     linux-ext4@vger.kernel.org
 S:     Maintained
 F:     fs/jbd/
@@ -5816,7 +5817,7 @@ S:        Maintained
 F:     fs/autofs4/
 
 KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
-M:     Michal Marek <mmarek@suse.cz>
+M:     Michal Marek <mmarek@suse.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
 L:     linux-kbuild@vger.kernel.org
@@ -5880,7 +5881,7 @@ F:        arch/x86/include/asm/svm.h
 F:     arch/x86/kvm/svm.c
 
 KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
-M:     Alexander Graf <agraf@suse.de>
+M:     Alexander Graf <agraf@suse.com>
 L:     kvm-ppc@vger.kernel.org
 W:     http://kvm.qumranet.com
 T:     git git://github.com/agraf/linux-2.6.git
@@ -5898,7 +5899,6 @@ S:        Supported
 F:     Documentation/s390/kvm.txt
 F:     arch/s390/include/asm/kvm*
 F:     arch/s390/kvm/
-F:     drivers/s390/kvm/
 
 KERNEL VIRTUAL MACHINE (KVM) FOR ARM
 M:     Christoffer Dall <christoffer.dall@linaro.org>
@@ -6037,7 +6037,7 @@ F:        drivers/leds/
 F:     include/linux/leds.h
 
 LEGACY EEPROM DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 S:     Maintained
 F:     Documentation/misc-devices/eeprom
 F:     drivers/misc/eeprom/eeprom.c
@@ -6090,7 +6090,7 @@ F:        include/linux/ata.h
 F:     include/linux/libata.h
 
 LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
-M:     Viresh Kumar <viresh.linux@gmail.com>
+M:     Viresh Kumar <vireshk@kernel.org>
 L:     linux-ide@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
 S:     Maintained
@@ -6251,8 +6251,8 @@ F:        drivers/platform/x86/hp_accel.c
 LIVE PATCHING
 M:     Josh Poimboeuf <jpoimboe@redhat.com>
 M:     Seth Jennings <sjenning@redhat.com>
-M:     Jiri Kosina <jkosina@suse.cz>
-M:     Vojtech Pavlik <vojtech@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
+M:     Vojtech Pavlik <vojtech@suse.com>
 S:     Maintained
 F:     kernel/livepatch/
 F:     include/linux/livepatch.h
@@ -6278,21 +6278,21 @@ S:      Maintained
 F:     drivers/hwmon/lm73.c
 
 LM78 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/lm78
 F:     drivers/hwmon/lm78.c
 
 LM83 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/lm83
 F:     drivers/hwmon/lm83.c
 
 LM90 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/lm90
@@ -6506,7 +6506,7 @@ F:        drivers/net/ethernet/marvell/mvneta.*
 
 MARVELL MWIFIEX WIRELESS DRIVER
 M:     Amitkumar Karwar <akarwar@marvell.com>
-M:     Avinash Patil <patila@marvell.com>
+M:     Nishant Sarmukadam <nishants@marvell.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/mwifiex/
@@ -6644,6 +6644,15 @@ W:       http://www.mellanox.com
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
 F:     drivers/net/ethernet/mellanox/mlx4/en_*
 
+MELLANOX ETHERNET SWITCH DRIVERS
+M:     Jiri Pirko <jiri@mellanox.com>
+M:     Ido Schimmel <idosch@mellanox.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+W:     http://www.mellanox.com
+Q:     http://patchwork.ozlabs.org/project/netdev/list/
+F:     drivers/net/ethernet/mellanox/mlxsw/
+
 MEMORY MANAGEMENT
 L:     linux-mm@kvack.org
 W:     http://www.linux-mm.org
@@ -6838,6 +6847,12 @@ T:       git git://linuxtv.org/anttip/media_tree.git
 S:     Maintained
 F:     drivers/media/usb/msi2500/
 
+MSYSTEMS DISKONCHIP G3 MTD DRIVER
+M:     Robert Jarzmik <robert.jarzmik@free.fr>
+L:     linux-mtd@lists.infradead.org
+S:     Maintained
+F:     drivers/mtd/devices/docg3*
+
 MT9M032 APTINA SENSOR DRIVER
 M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:     linux-media@vger.kernel.org
@@ -7719,7 +7734,7 @@ S:        Maintained
 F:     drivers/char/pc8736x_gpio.c
 
 PC87427 HARDWARE MONITORING DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/pc87427
@@ -7996,7 +8011,7 @@ S:        Maintained
 F:     drivers/pinctrl/samsung/
 
 PIN CONTROLLER - ST SPEAR
-M:     Viresh Kumar <viresh.linux@gmail.com>
+M:     Viresh Kumar <vireshk@kernel.org>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -8004,7 +8019,7 @@ S:        Maintained
 F:     drivers/pinctrl/spear/
 
 PKTCDVD DRIVER
-M:     Jiri Kosina <jkosina@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
 S:     Maintained
 F:     drivers/block/pktcdvd.c
 F:     include/linux/pktcdvd.h
@@ -8895,13 +8910,20 @@ S:      Maintained
 F:     drivers/tty/serial/
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
-M:     Viresh Kumar <viresh.linux@gmail.com>
+M:     Viresh Kumar <vireshk@kernel.org>
 M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 S:     Maintained
 F:     include/linux/dma/dw.h
 F:     include/linux/platform_data/dma-dw.h
 F:     drivers/dma/dw/
 
+SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
+M: Lars Persson <lars.persson@axis.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:     Seungwon Jeon <tgih.jun@samsung.com>
 M:     Jaehoon Chung <jh80.chung@samsung.com>
@@ -9062,7 +9084,7 @@ S:        Maintained
 F:     drivers/mmc/host/sdhci-s3c*
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
-M:     Viresh Kumar <viresh.linux@gmail.com>
+M:     Viresh Kumar <vireshk@kernel.org>
 L:     spear-devel@list.st.com
 L:     linux-mmc@vger.kernel.org
 S:     Maintained
@@ -9424,7 +9446,7 @@ F:        Documentation/hwmon/sch5627
 F:     drivers/hwmon/sch5627.c
 
 SMSC47B397 HARDWARE MONITOR DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/smsc47b397
@@ -9473,7 +9495,7 @@ S:        Supported
 F:     drivers/media/pci/solo6x10/
 
 SOFTWARE RAID (Multiple Disks) SUPPORT
-M:     Neil Brown <neilb@suse.de>
+M:     Neil Brown <neilb@suse.com>
 L:     linux-raid@vger.kernel.org
 S:     Supported
 F:     drivers/md/
@@ -9516,7 +9538,7 @@ F:        drivers/memstick/core/ms_block.*
 
 SOUND
 M:     Jaroslav Kysela <perex@perex.cz>
-M:     Takashi Iwai <tiwai@suse.de>
+M:     Takashi Iwai <tiwai@suse.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:     http://www.alsa-project.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
@@ -9600,7 +9622,7 @@ S:        Maintained
 F:     include/linux/compiler.h
 
 SPEAR PLATFORM SUPPORT
-M:     Viresh Kumar <viresh.linux@gmail.com>
+M:     Viresh Kumar <vireshk@kernel.org>
 M:     Shiraz Hashim <shiraz.linux.kernel@gmail.com>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -9609,7 +9631,7 @@ S:        Maintained
 F:     arch/arm/mach-spear/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
-M:     Viresh Kumar <viresh.linux@gmail.com>
+M:     Viresh Kumar <vireshk@kernel.org>
 L:     spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
@@ -10399,7 +10421,7 @@ K:      ^Subject:.*(?i)trivial
 
 TTY LAYER
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-M:     Jiri Slaby <jslaby@suse.cz>
+M:     Jiri Slaby <jslaby@suse.com>
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty.git
 F:     Documentation/serial/
@@ -10473,7 +10495,7 @@ F:      arch/m68k/*/*_no.*
 F:     arch/m68k/include/asm/*_no.*
 
 UDF FILESYSTEM
-M:     Jan Kara <jack@suse.cz>
+M:     Jan Kara <jack@suse.com>
 S:     Maintained
 F:     Documentation/filesystems/udf.txt
 F:     fs/udf/
@@ -10616,7 +10638,7 @@ F:      drivers/usb/gadget/
 F:     include/linux/usb/gadget*
 
 USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
-M:     Jiri Kosina <jkosina@suse.cz>
+M:     Jiri Kosina <jkosina@suse.com>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:     Maintained
@@ -10741,7 +10763,7 @@ S:      Maintained
 F:     drivers/usb/host/uhci*
 
 USB "USBNET" DRIVER FRAMEWORK
-M:     Oliver Neukum <oneukum@suse.de>
+M:     Oliver Neukum <oneukum@suse.com>
 L:     netdev@vger.kernel.org
 W:     http://www.linux-usb.org/usbnet
 S:     Maintained
@@ -10895,6 +10917,15 @@ F:     drivers/block/virtio_blk.c
 F:     include/linux/virtio_*.h
 F:     include/uapi/linux/virtio_*.h
 
+VIRTIO DRIVERS FOR S390
+M:     Christian Borntraeger <borntraeger@de.ibm.com>
+M:     Cornelia Huck <cornelia.huck@de.ibm.com>
+L:     linux-s390@vger.kernel.org
+L:     virtualization@lists.linux-foundation.org
+L:     kvm@vger.kernel.org
+S:     Supported
+F:     drivers/s390/virtio/
+
 VIRTIO GPU DRIVER
 M:     David Airlie <airlied@linux.ie>
 M:     Gerd Hoffmann <kraxel@redhat.com>
@@ -11068,7 +11099,7 @@ F:      Documentation/hwmon/w83793
 F:     drivers/hwmon/w83793.c
 
 W83795 HARDWARE MONITORING DRIVER
-M:     Jean Delvare <jdelvare@suse.de>
+M:     Jean Delvare <jdelvare@suse.com>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     drivers/hwmon/w83795.c
index 257ef5892ab7483c973e9fbcbd2d76e02af34e50..afabc44a349b7b31a2028660e7e61b134556a675 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
@@ -780,10 +780,11 @@ endif
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
 
-# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
-KBUILD_CPPFLAGS += $(KCPPFLAGS)
-KBUILD_AFLAGS += $(KAFLAGS)
-KBUILD_CFLAGS += $(KCFLAGS)
+# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
+# last assignments
+KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
+KBUILD_AFLAGS   += $(ARCH_AFLAGS)   $(KAFLAGS)
+KBUILD_CFLAGS   += $(ARCH_CFLAGS)   $(KCFLAGS)
 
 # Use --build-id when available.
 LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
index bec6666a3cc466c81baea023dcd010a65d3d8181..8a8ea7110de84482a5c33e53a50d0fc908998f3e 100644 (file)
@@ -221,6 +221,10 @@ config ARCH_TASK_STRUCT_ALLOCATOR
 config ARCH_THREAD_INFO_ALLOCATOR
        bool
 
+# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
+config ARCH_WANTS_DYNAMIC_TASK_STRUCT
+       bool
+
 config HAVE_REGS_AND_STACK_ACCESS_API
        bool
        help
index cde23cd03609afe5fce911421cc384485368ba64..ffd9cf5ec8c407c4686c519de89b3e827f1656a2 100644 (file)
@@ -5,6 +5,7 @@ generic-y += cputime.h
 generic-y += exec.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
diff --git a/arch/alpha/include/asm/mm-arch-hooks.h b/arch/alpha/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index b07fd86..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ALPHA_MM_ARCH_HOOKS_H
-#define _ASM_ALPHA_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_ALPHA_MM_ARCH_HOOKS_H */
index e7cee0a5c56dfa80222d8286a63342b10c07bc42..91cf4055acab0439e564a96056012befd5fb4c36 100644 (file)
@@ -115,6 +115,7 @@ if ISA_ARCOMPACT
 
 config ARC_CPU_750D
        bool "ARC750D"
+       select ARC_CANT_LLSC
        help
          Support for ARC750 core
 
@@ -362,7 +363,7 @@ config ARC_CANT_LLSC
 config ARC_HAS_LLSC
        bool "Insn: LLOCK/SCOND (efficient atomic ops)"
        default y
-       depends on !ARC_CPU_750D && !ARC_CANT_LLSC
+       depends on !ARC_CANT_LLSC
 
 config ARC_HAS_SWAPE
        bool "Insn: SWAPE (endian-swap)"
index 6107062c01115dbea8a56e02bce254a8ba5b91af..46d87310220dadaf96be4ff08c42b240d2eb4916 100644 (file)
@@ -49,7 +49,8 @@ endif
 
 ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
 # Generic build system uses -O2, we want -O3
-cflags-y  += -O3
+# Note: No need to add to cflags-y as that happens anyways
+ARCH_CFLAGS += -O3
 endif
 
 # small data is default for elf32 tool-chain. If not usable, disable it
index 15c8d6226c9d8508b54fbecc6547f5c0a0529e14..1cd5e82f5dc2c6f74cb36d40632998fd26195426 100644 (file)
@@ -12,7 +12,7 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <75000000>;
+       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 199d42820eca784b4bd5955d3a7420bc53697774..2f0b33257db2e2ecf4749bee0d2f5ed3260dc3a3 100644 (file)
@@ -12,7 +12,7 @@
 
 / {
        compatible = "snps,arc";
-       clock-frequency = <75000000>;
+       clock-frequency = <90000000>;
        #address-cells = <1>;
        #size-cells = <1>;
 
index 1a80cc91a03ba323f8418dfb705098a00f031671..7611b10a2d238c7b4bb73696b59e2fe8b14ceb2c 100644 (file)
@@ -22,6 +22,7 @@ generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += param.h
index 99fe118d3730bc050263e5be7dd3423ab659d46c..57c1f33844d44f1f9d16ed448ce0b13e87b88cea 100644 (file)
@@ -50,8 +50,7 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
         * done for const @nr, but no code is generated due to gcc      \
         * const prop.                                                  \
         */                                                             \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
+       nr &= 0x1f;                                                     \
                                                                        \
        __asm__ __volatile__(                                           \
        "1:     llock       %0, [%1]            \n"                     \
@@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
                                                                        \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
+       nr &= 0x1f;                                                     \
                                                                        \
        /*                                                              \
         * Explicit full memory barrier needed before/after as          \
@@ -129,16 +127,13 @@ static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
        unsigned long temp, flags;                                      \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
-                                                                       \
        /*                                                              \
         * spin lock/unlock provide the needed smp_mb() before/after    \
         */                                                             \
        bitops_lock(flags);                                             \
                                                                        \
        temp = *m;                                                      \
-       *m = temp c_op (1UL << nr);                                     \
+       *m = temp c_op (1UL << (nr & 0x1f));                                    \
                                                                        \
        bitops_unlock(flags);                                           \
 }
@@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *
        unsigned long old, flags;                                       \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
-                                                                       \
        bitops_lock(flags);                                             \
                                                                        \
        old = *m;                                                       \
-       *m = old c_op (1 << nr);                                        \
+       *m = old c_op (1UL << (nr & 0x1f));                             \
                                                                        \
        bitops_unlock(flags);                                           \
                                                                        \
-       return (old & (1 << nr)) != 0;                                  \
+       return (old & (1UL << (nr & 0x1f))) != 0;                       \
 }
 
 #endif /* CONFIG_ARC_HAS_LLSC */
@@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)       \
        unsigned long temp;                                             \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
-                                                                       \
        temp = *m;                                                      \
-       *m = temp c_op (1UL << nr);                                     \
+       *m = temp c_op (1UL << (nr & 0x1f));                            \
 }
 
 #define __TEST_N_BIT_OP(op, c_op, asm_op)                              \
@@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long
        unsigned long old;                                              \
        m += nr >> 5;                                                   \
                                                                        \
-       if (__builtin_constant_p(nr))                                   \
-               nr &= 0x1f;                                             \
-                                                                       \
        old = *m;                                                       \
-       *m = old c_op (1 << nr);                                        \
+       *m = old c_op (1UL << (nr & 0x1f));                             \
                                                                        \
-       return (old & (1 << nr)) != 0;                                  \
+       return (old & (1UL << (nr & 0x1f))) != 0;                       \
 }
 
 #define BIT_OPS(op, c_op, asm_op)                                      \
@@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
 
        addr += nr >> 5;
 
-       if (__builtin_constant_p(nr))
-               nr &= 0x1f;
-
-       mask = 1 << nr;
+       mask = 1UL << (nr & 0x1f);
 
        return ((mask & *addr) != 0);
 }
index 05b5aaf5b0f91e5580395e08ae778f5ddace5b3c..70cfe16b742d78f7e8016a41b11271e7d258f7a6 100644 (file)
 #include <linux/uaccess.h>
 #include <asm/errno.h>
 
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+                                                       \
+       __asm__ __volatile__(                           \
+       "1:     llock   %1, [%2]                \n"     \
+               insn                            "\n"    \
+       "2:     scond   %0, [%2]                \n"     \
+       "       bnz     1b                      \n"     \
+       "       mov %0, 0                       \n"     \
+       "3:                                     \n"     \
+       "       .section .fixup,\"ax\"          \n"     \
+       "       .align  4                       \n"     \
+       "4:     mov %0, %4                      \n"     \
+       "       b   3b                          \n"     \
+       "       .previous                       \n"     \
+       "       .section __ex_table,\"a\"       \n"     \
+       "       .align  4                       \n"     \
+       "       .word   1b, 4b                  \n"     \
+       "       .word   2b, 4b                  \n"     \
+       "       .previous                       \n"     \
+                                                       \
+       : "=&r" (ret), "=&r" (oldval)                   \
+       : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
+       : "cc", "memory")
+
+#else  /* !CONFIG_ARC_HAS_LLSC */
+
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
                                                        \
        __asm__ __volatile__(                           \
-       "1:     ld  %1, [%2]                    \n"     \
+       "1:     ld      %1, [%2]                \n"     \
                insn                            "\n"    \
-       "2:     st  %0, [%2]                    \n"     \
+       "2:     st      %0, [%2]                \n"     \
        "       mov %0, 0                       \n"     \
        "3:                                     \n"     \
        "       .section .fixup,\"ax\"          \n"     \
@@ -39,6 +67,8 @@
        : "r" (uaddr), "r" (oparg), "ir" (-EFAULT)      \
        : "cc", "memory")
 
+#endif
+
 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 {
        int op = (encoded_op >> 28) & 7;
@@ -123,11 +153,17 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
 
        pagefault_disable();
 
-       /* TBD : can use llock/scond */
        __asm__ __volatile__(
-       "1:     ld    %0, [%3]  \n"
-       "       brne  %0, %1, 3f        \n"
-       "2:     st    %2, [%3]  \n"
+#ifdef CONFIG_ARC_HAS_LLSC
+       "1:     llock   %0, [%3]                \n"
+       "       brne    %0, %1, 3f              \n"
+       "2:     scond   %2, [%3]                \n"
+       "       bnz     1b                      \n"
+#else
+       "1:     ld      %0, [%3]                \n"
+       "       brne    %0, %1, 3f              \n"
+       "2:     st      %2, [%3]                \n"
+#endif
        "3:     \n"
        "       .section .fixup,\"ax\"  \n"
        "4:     mov %0, %4      \n"
diff --git a/arch/arc/include/asm/mm-arch-hooks.h b/arch/arc/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index c37541c..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARC_MM_ARCH_HOOKS_H
-#define _ASM_ARC_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_ARC_MM_ARCH_HOOKS_H */
index 91755972b9a25222c37a36e6b76dfd758cdbe771..91694ec1ce959498fd5b4431962b03bbdf4119b7 100644 (file)
@@ -106,7 +106,7 @@ struct callee_regs {
        long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
 };
 
-#define instruction_pointer(regs)      ((regs)->ret)
+#define instruction_pointer(regs)      (unsigned long)((regs)->ret)
 #define profile_pc(regs)               instruction_pointer(regs)
 
 /* return 1 if user mode or 0 if kernel mode */
index 6208c630abed23a4fe2fd1306d5b31c6ec1d172e..26c15682747960d3b5b1c412f76dff3c2ebc7780 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip.h>
-#include "../../drivers/irqchip/irqchip.h"
 #include <asm/irq.h>
 
 /*
index fcdddb631766eab0c9a2d3dd3d9b3456c90dfdb4..039fac30b5c1f2fca837c6f9cc11de0e0c56c35d 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/of.h>
 #include <linux/irqdomain.h>
 #include <linux/irqchip.h>
-#include "../../drivers/irqchip/irqchip.h"
 #include <asm/irq.h>
 
 /*
index 30284e8de6ffc2af0844468798a5c1002465eb56..2fb86589054de6c0b051325463ebd03236c70299 100644 (file)
@@ -175,7 +175,6 @@ void mcip_init_early_smp(void)
 #include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
-#include "../../drivers/irqchip/irqchip.h"
 
 /*
  * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
@@ -218,11 +217,28 @@ static void idu_irq_unmask(struct irq_data *data)
        raw_spin_unlock_irqrestore(&mcip_lock, flags);
 }
 
+#ifdef CONFIG_SMP
 static int
-idu_irq_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool f)
+idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
+                    bool force)
 {
+       unsigned long flags;
+       cpumask_t online;
+
+       /* errout if no online cpu per @cpumask */
+       if (!cpumask_and(&online, cpumask, cpu_online_mask))
+               return -EINVAL;
+
+       raw_spin_lock_irqsave(&mcip_lock, flags);
+
+       idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
+       idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
+
+       raw_spin_unlock_irqrestore(&mcip_lock, flags);
+
        return IRQ_SET_MASK_OK;
 }
+#endif
 
 static struct irq_chip idu_irq_chip = {
        .name                   = "MCIP IDU Intc",
@@ -330,8 +346,7 @@ idu_of_init(struct device_node *intc, struct device_node *parent)
                if (!i)
                        idu_first_irq = irq;
 
-               irq_set_handler_data(irq, domain);
-               irq_set_chained_handler(irq, idu_cascade_isr);
+               irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
        }
 
        __mcip_cmd(CMD_IDU_ENABLE, 0);
index a3d186211ed367bcf852718682a62f24b555f3de..18cc01591c96e64186a8b13c1aef5b8011091b12 100644 (file)
@@ -142,17 +142,22 @@ static void read_arc_build_cfg_regs(void)
 }
 
 static const struct cpuinfo_data arc_cpu_tbl[] = {
+#ifdef CONFIG_ISA_ARCOMPACT
        { {0x20, "ARC 600"      }, 0x2F},
        { {0x30, "ARC 700"      }, 0x33},
        { {0x34, "ARC 700 R4.10"}, 0x34},
        { {0x35, "ARC 700 R4.11"}, 0x35},
-       { {0x50, "ARC HS38"     }, 0x51},
+#else
+       { {0x50, "ARC HS38 R2.0"}, 0x51},
+       { {0x52, "ARC HS38 R2.1"}, 0x52},
+#endif
        { {0x00, NULL           } }
 };
 
-#define IS_AVAIL1(v, str)      ((v) ? str : "")
-#define IS_USED(cfg)           (IS_ENABLED(cfg) ? "" : "(not used) ")
-#define IS_AVAIL2(v, str, cfg)  IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg))
+#define IS_AVAIL1(v, s)                ((v) ? s : "")
+#define IS_USED_RUN(v)         ((v) ? "" : "(not used) ")
+#define IS_USED_CFG(cfg)       IS_USED_RUN(IS_ENABLED(cfg))
+#define IS_AVAIL2(v, s, cfg)   IS_AVAIL1(v, s), IS_AVAIL1(v, IS_USED_CFG(cfg))
 
 static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 {
@@ -226,7 +231,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                        n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
                }
                n += scnprintf(buf + n, len - n, "%s",
-                              IS_USED(CONFIG_ARC_HAS_HW_MPY));
+                              IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
        }
 
        n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
index 807f7d61d7a7cf867bca011251729d1164bd3f33..a6f91e88ce36e3ea2a2c95d8eabdceffa48be7fc 100644 (file)
@@ -58,7 +58,6 @@ static void show_callee_regs(struct callee_regs *cregs)
 
 static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
 {
-       struct path path;
        char *path_nm = NULL;
        struct mm_struct *mm;
        struct file *exe_file;
index b29d62ed4f7ece64acc41287ded40e3cfb3e186b..1cd6695b6ab50cbbb870ffa969027a50103dba29 100644 (file)
@@ -468,10 +468,18 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
 noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
 {
 #ifdef CONFIG_ISA_ARCV2
+       /*
+        * SLC is shared between all cores and concurrent aux operations from
+        * multiple cores need to be serialized using a spinlock
+        * A concurrent operation can be silently ignored and/or the old/new
+        * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
+        * below)
+        */
+       static DEFINE_SPINLOCK(lock);
        unsigned long flags;
        unsigned int ctrl;
 
-       local_irq_save(flags);
+       spin_lock_irqsave(&lock, flags);
 
        /*
         * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
@@ -504,7 +512,7 @@ noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
 
        while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
 
-       local_irq_restore(flags);
+       spin_unlock_irqrestore(&lock, flags);
 #endif
 }
 
index 74a637a1cfc48b2c5d4f0047a0b25bc51d238e16..57706a9c69489791df22419ec069c1970d1aa634 100644 (file)
@@ -60,8 +60,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 
        /* This is kernel Virtual address (0x7000_0000 based) */
        kvaddr = ioremap_nocache((unsigned long)paddr, size);
-       if (kvaddr != NULL)
-               memset(kvaddr, 0, size);
+       if (kvaddr == NULL)
+               return NULL;
 
        /* This is bus address, platform dependent */
        *dma_handle = (dma_addr_t)paddr;
index 0d35ab64641c4a4c906dbf471a4e2d06b51651fe..7106114c74647cd2e76499a6f5654ddf5f98f73d 100644 (file)
@@ -74,6 +74,7 @@
        audio_codec: tlv320aic3106@1b {
                compatible = "ti,tlv320aic3106";
                reg = <0x1b>;
+               ai3x-micbias-vg = <0x2>;
        };
 
        accel: lis331dlh@1d {
        ti,audio-routing =
                "Headphone Jack",       "HPLOUT",
                "Headphone Jack",       "HPROUT",
-               "LINE1L",               "Line In";
+               "MIC3L",                "Mic3L Switch";
 };
 
 &mcasp0 {
        regulators {
                dcdc1_reg: regulator@0 {
                        /* VDD_1V8 system supply */
+                       regulator-always-on;
                };
 
                dcdc2_reg: regulator@1 {
                        /* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
                        regulator-name = "vdd_core";
                        regulator-min-microvolt = <925000>;
-                       regulator-max-microvolt = <1325000>;
+                       regulator-max-microvolt = <1150000>;
                        regulator-boot-on;
+                       regulator-always-on;
                };
 
                dcdc3_reg: regulator@2 {
                        /* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
                        regulator-name = "vdd_mpu";
                        regulator-min-microvolt = <925000>;
-                       regulator-max-microvolt = <1150000>;
+                       regulator-max-microvolt = <1325000>;
                        regulator-boot-on;
+                       regulator-always-on;
                };
 
                ldo1_reg: regulator@3 {
                        /* VRTC 1.8V always-on supply */
+                       regulator-name = "vrtc,vdds";
                        regulator-always-on;
                };
 
                ldo2_reg: regulator@4 {
                        /* 3.3V rail */
+                       regulator-name = "vdd_3v3aux";
+                       regulator-always-on;
                };
 
                ldo3_reg: regulator@5 {
                        /* VDD_3V3A 3.3V rail */
+                       regulator-name = "vdd_3v3a";
                        regulator-min-microvolt = <3300000>;
                        regulator-max-microvolt = <3300000>;
                };
 
                ldo4_reg: regulator@6 {
                        /* VDD_3V3B 3.3V rail */
+                       regulator-name = "vdd_3v3b";
+                       regulator-always-on;
                };
        };
 };
index 9c7fb0acae79c9d3c34f58e1acca360057d0c43e..4e42f30cb318df29a1fad4c9ef8752c769cad137 100644 (file)
@@ -22,6 +22,7 @@
                        MATRIX_KEY(0x00, 0x02, KEY_F1)
                        MATRIX_KEY(0x00, 0x03, KEY_B)
                        MATRIX_KEY(0x00, 0x04, KEY_F10)
+                       MATRIX_KEY(0x00, 0x05, KEY_RO)
                        MATRIX_KEY(0x00, 0x06, KEY_N)
                        MATRIX_KEY(0x00, 0x08, KEY_EQUAL)
                        MATRIX_KEY(0x00, 0x0a, KEY_RIGHTALT)
@@ -34,6 +35,7 @@
                        MATRIX_KEY(0x01, 0x08, KEY_APOSTROPHE)
                        MATRIX_KEY(0x01, 0x09, KEY_F9)
                        MATRIX_KEY(0x01, 0x0b, KEY_BACKSPACE)
+                       MATRIX_KEY(0x01, 0x0c, KEY_HENKAN)
 
                        MATRIX_KEY(0x02, 0x00, KEY_LEFTCTRL)
                        MATRIX_KEY(0x02, 0x01, KEY_TAB)
@@ -45,6 +47,7 @@
                        MATRIX_KEY(0x02, 0x07, KEY_102ND)
                        MATRIX_KEY(0x02, 0x08, KEY_LEFTBRACE)
                        MATRIX_KEY(0x02, 0x09, KEY_F8)
+                       MATRIX_KEY(0x02, 0x0a, KEY_YEN)
 
                        MATRIX_KEY(0x03, 0x01, KEY_GRAVE)
                        MATRIX_KEY(0x03, 0x02, KEY_F2)
@@ -53,6 +56,7 @@
                        MATRIX_KEY(0x03, 0x06, KEY_6)
                        MATRIX_KEY(0x03, 0x08, KEY_MINUS)
                        MATRIX_KEY(0x03, 0x0b, KEY_BACKSLASH)
+                       MATRIX_KEY(0x03, 0x0c, KEY_MUHENKAN)
 
                        MATRIX_KEY(0x04, 0x00, KEY_RIGHTCTRL)
                        MATRIX_KEY(0x04, 0x01, KEY_A)
index c892d58e8dad38252dfcdd13c5f758cdc05aca36..b995333ea22b1315cd3a322e7d840b9a9c653c36 100644 (file)
                                interrupts = <36 37 38 39 40 41 42 43 44>;
                                status = "disabled";
                                clocks = <&clks 26>;
+                               #io-channel-cells = <1>;
                        };
 
                        spdif@80054000 {
index dd45e6971bc35061a3b9d5b4579c6e6697ca9eca..9351296356dcc419ccb2746efa031df01bcb6462 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
 #include "imx25.dtsi"
 
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio2 1 0>;
-       wp-gpios = <&gpio2 0 0>;
+       cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index bc215e4b75fd52c6e5b2e271b4a9e6265d442205..b69be5c499cfc69f8df1902fffd3d0d73eb47280 100644 (file)
                        };
 
                        gpt1: timer@10003000 {
-                               compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+                               compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
                                reg = <0x10003000 0x1000>;
                                interrupts = <26>;
                                clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>,
                        };
 
                        gpt2: timer@10004000 {
-                               compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+                               compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
                                reg = <0x10004000 0x1000>;
                                interrupts = <25>;
                                clocks = <&clks IMX27_CLK_GPT2_IPG_GATE>,
                        };
 
                        gpt3: timer@10005000 {
-                               compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+                               compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
                                reg = <0x10005000 0x1000>;
                                interrupts = <24>;
                                clocks = <&clks IMX27_CLK_GPT3_IPG_GATE>,
                        };
 
                        gpt4: timer@10019000 {
-                               compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+                               compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
                                reg = <0x10019000 0x1000>;
                                interrupts = <4>;
                                clocks = <&clks IMX27_CLK_GPT4_IPG_GATE>,
                        };
 
                        gpt5: timer@1001a000 {
-                               compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+                               compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
                                reg = <0x1001a000 0x1000>;
                                interrupts = <3>;
                                clocks = <&clks IMX27_CLK_GPT5_IPG_GATE>,
                        };
 
                        gpt6: timer@1001f000 {
-                               compatible = "fsl,imx27-gpt", "fsl,imx1-gpt";
+                               compatible = "fsl,imx27-gpt", "fsl,imx21-gpt";
                                reg = <0x1001f000 0x1000>;
                                interrupts = <2>;
                                clocks = <&clks IMX27_CLK_GPT6_IPG_GATE>,
index 93d3ea12328c50c07cf9d7ab4d09fc952dc5a397..0f3fe29b816ebad2c77d95271e76e089437109b2 100644 (file)
@@ -98,7 +98,7 @@
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
        bus-width = <4>;
        status = "okay";
 };
index e9337ad52f59b2159a9419b7d177f2cf6baccfe3..3bc18835fb4bbbe0e388922689a70395bb532cd6 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio1 1 0>;
-       wp-gpios = <&gpio1 9 0>;
+       cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index d0e0f57eb432e9822b005e986c105fc7949e028b..53f40885c530637c1776dc6172764cf6669f918d 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio1 1 0>;
-       wp-gpios = <&gpio1 9 0>;
+       cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index 181ae5ebf23f64396c8a0b24aa01ef7121c73c87..b0d5542ac829a53b1168c8d8501f5e1fcbd8d167 100644 (file)
 &esdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc3>;
-       cd-gpios = <&gpio3 11 0>;
-       wp-gpios = <&gpio3 12 0>;
+       cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
        bus-width = <8>;
        status = "okay";
 };
 &tve {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_vga_sync>;
+       ddc-i2c-bus = <&i2c2>;
        fsl,tve-mode = "vga";
-       fsl,hsync-pin = <4>;
-       fsl,vsync-pin = <6>;
+       fsl,hsync-pin = <7>;    /* IPU DI1 PIN7 via EIM_OE */
+       fsl,vsync-pin = <8>;    /* IPU DI1 PIN8 via EIM_RW */
        status = "okay";
 };
 
index 1d325576bcc04d4a119d96f7c85612f2f4bd62a2..fc89ce1e5763a2b03d5da06b38e6e3896e56dd20 100644 (file)
@@ -41,8 +41,8 @@
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       cd-gpios = <&gpio3 13 0>;
-       wp-gpios = <&gpio4 11 0>;
+       cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index 4f1f0e2868bf12816ec93f545a8f592e43e7f64d..e03373a58760ff79c431b40fabbb8b54e1d1039e 100644 (file)
@@ -41,8 +41,8 @@
        pinctrl-0 = <&pinctrl_esdhc2>,
                    <&pinctrl_esdhc2_cdwp>;
        vmmc-supply = <&reg_3p3v>;
-       wp-gpios = <&gpio1 2 0>;
-       cd-gpios = <&gpio1 4 0>;
+       wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        status = "disabled";
 };
 
index 704bd72cbfec823da4145ead1fd6e7dc719d094b..d3e50b22064f28777bbd2a3b60f512d844ff9418 100644 (file)
 };
 
 &esdhc1 {
-       cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
        fsl,wp-controller;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
 };
 
 &esdhc2 {
-       cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
        fsl,wp-controller;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
index c17d3ad6dba50213c18a9076ca15c261d6740e77..fc51b87ad2087022e8b8648c71fac18e00abb762 100644 (file)
 &esdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
-       cd-gpios = <&gpio3 25 0>;
-       wp-gpios = <&gpio2 19 0>;
+       cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 43cb3fd76be764cdceb08efd949f47866ebe03e9..5111f5170d5343398bab4ec60e548de07f26200b 100644 (file)
 &usdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
-       cd-gpios = <&gpio1 4 0>;
-       wp-gpios = <&gpio1 2 0>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 0>;
-       wp-gpios = <&gpio7 1 0>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 78df05e9d1ce61ca7d71c0825367cd8cd3757925..d6515f7a56c427bc7a8490f932b78320f942a624 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include "imx6q.dtsi"
 
 / {
 };
 
 &usdhc3 {
-       cd-gpios = <&gpio6 11 0>;
-       wp-gpios = <&gpio6 14 0>;
+       cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3
index 703539cf36d3078fa7b179fad58c26f9a72b0069..00bd63e63d0cdd47f4ffa012f572f10323a2d14f 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include "imx6q.dtsi"
 
 / {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
        bus-width = <4>;
-       cd-gpios = <&gpio6 11 0>;
+       cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index a43abfa21e33b9b596dbab2e82b8e121baebdd00..5645d52850a7eca0f1905f7341935ae82ce99859 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <4>;
-       cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
        bus-width = <4>;
-       cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
index e6d9195a1da7bfb98260f0d2ca9bbd0f57de9374..f4d6ae564ead290cd9fc1e02d519573364c47af5 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
        vmmc-supply = <&reg_3p3v>;
-       cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
        vmmc-supply = <&reg_3p3v>;
-       cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index 1d85de2befb3ef9c8e1257d33a34721e502e1755..a47a0399a1728da0c1293a8312f30bef082fd349 100644 (file)
 &usdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
-       cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
        no-1-8-v;
        status = "okay";
 };
 &usdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
-       cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
        no-1-8-v;
        status = "okay";
index 59e5d15e3ec4bad9cc664fe0f985502cf39a1375..ff41f83551de6ee72bb6da96b24c3e3ed87d4c58 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
        vmmc-supply = <&reg_3p3v>;
-       cd-gpios = <&gpio1 4 0>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index 2c253d6d20bd1fec6b71d18347f27ef0126e3f5e..45e7c39e80d584c73ade1523a3e4316e549c76d4 100644 (file)
@@ -1,3 +1,5 @@
+#include <dt-bindings/gpio/gpio.h>
+
 / {
        regulators {
                compatible = "simple-bus";
 &usdhc2 { /* module slot */
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
-       cd-gpios = <&gpio2 2 0>;
+       cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
index b5756c21ea1d55b791b10e6728b573a918ad78e7..4493f6e993301da96edefaac1334372cd18cf4c7 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 86f03c1b147c630c43166aa9d4782da28f333519..a857d1294609a0a0670a7d3f92e446e92185b743 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 4a8d97f477592316c3d9ff86cee4bc1b5c9e887f..1afe3385e2d283b7a9a2da84e37ca595ce04c13f 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 62a82f3eba888f7d16f11e8dc8cac129ae4c2073..6dd0b764e036d1c51cdde3015c1a2cc56178a999 100644 (file)
                &pinctrl_hummingboard_usdhc2
        >;
        vmmc-supply = <&reg_3p3v>;
-       cd-gpios = <&gpio1 4 0>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index 3af16dfe417be4bb6ec89a2f870e05f1b2df3214..d7fe6672d00cf38141aca0ea88cd807cff4b3cc7 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 0>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
 &usdhc4 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc4>;
-       cd-gpios = <&gpio2 6 0>;
+       cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index 1ce6133b67f5c65fefd2fe85d368ac455b199950..9e6ecd99b472dbcb5ce707048fc4fb4775a25a77 100644 (file)
 &usdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
-       cd-gpios = <&gpio1 4 0>;
-       wp-gpios = <&gpio1 2 0>;
+       cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
        status = "disabled";
 };
 
         pinctrl-names = "default";
         pinctrl-0 = <&pinctrl_usdhc3
                     &pinctrl_usdhc3_cdwp>;
-        cd-gpios = <&gpio1 27 0>;
-        wp-gpios = <&gpio1 29 0>;
+       cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
         status = "disabled";
 };
index 488a640796ac05fa50c4299185fbe71c64ef1a13..3373fd958e95c72b098ed14ea2a3228ba7903ea6 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <4>;
        cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
-       wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
        pinctrl-0 = <&pinctrl_usdhc3>;
        bus-width = <4>;
        cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
-       wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
index 3b24b12651b2b86ee1a74d5baccca435778ddd3e..e329ca5c3322716e14a9117d7d8e1ca956c89f0b 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc3>;
        pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
-       cd-gpios = <&gpio6 15 0>;
-       wp-gpios = <&gpio1 13 0>;
+       cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index e00c44f6a0df888f6ecb8935ddc99b85e932ee43..782379320517735f7beb62814e0ce1944ecebb18 100644 (file)
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio7 0 0>;
-       wp-gpios = <&gpio7 1 0>;
+       cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
 &usdhc4 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc4>;
-       cd-gpios = <&gpio2 6 0>;
+       cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
        vmmc-supply = <&reg_3p3v>;
        status = "okay";
 };
index a626e6dd8022c04defdbc56171147c04027aa48b..944eb81cb2b8c03aa663a1ee7f2bcc6ca664af52 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <8>;
-       cd-gpios = <&gpio2 2 0>;
-       wp-gpios = <&gpio2 3 0>;
+       cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
        bus-width = <8>;
-       cd-gpios = <&gpio2 0 0>;
-       wp-gpios = <&gpio2 1 0>;
+       cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
index f02b80b41d4fb94d9a5f690d9ddc6d89371e32f6..da08de324e9eb595db45c7cae308327d77bd33a9 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc1>;
        bus-width = <4>;
        no-1-8-v;
-       cd-gpios = <&gpio7 2 0>;
+       cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
        fsl,wp-controller;
        status = "okay";
 };
        pinctrl-0 = <&pinctrl_usdhc2>;
        bus-width = <4>;
        no-1-8-v;
-       cd-gpios = <&gpio7 3 0>;
+       cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
        fsl,wp-controller;
        status = "okay";
 };
index 5fb091675582e25b84026f5447d0be604f9266ac..9e096d811bedac74d09c6ebf584665ff1ac6fead 100644 (file)
@@ -9,6 +9,8 @@
  *
  */
 
+#include <dt-bindings/gpio/gpio.h>
+
 / {
        regulators {
                compatible = "simple-bus";
 &usdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
-       cd-gpios = <&gpio1 2 0>;
+       cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
 &usdhc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc3>;
-       cd-gpios = <&gpio3 9 0>;
+       cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index 945887d3fdb35a6588155590474479901a45671f..b84dff2e94ea1e4e44c15a054d90e67f9d06bde6 100644 (file)
        pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
        bus-width = <8>;
-       cd-gpios = <&gpio4 7 0>;
-       wp-gpios = <&gpio4 6 0>;
+       cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
        pinctrl-0 = <&pinctrl_usdhc2>;
        pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
-       cd-gpios = <&gpio5 0 0>;
-       wp-gpios = <&gpio4 29 0>;
+       cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
        pinctrl-0 = <&pinctrl_usdhc3>;
        pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
-       cd-gpios = <&gpio3 22 0>;
+       cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
index e3c0b63c22056764f93dbecfc2b9500770a6343c..115f3fd78971868ad7025fbb0f46c17c4252e574 100644 (file)
@@ -49,7 +49,7 @@
        pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        bus-width = <8>;
-       cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
        keep-power-in-suspend;
        enable-sdio-wakeup;
@@ -61,7 +61,7 @@
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc4>;
        bus-width = <8>;
-       cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
        no-1-8-v;
        keep-power-in-suspend;
        enable-sdio-wakup;
index cef04cef3a807f44efdc6a44fb754e0eb4b40b1e..ac88c3467078ec92971324395101b7db785da005 100644 (file)
        pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        bus-width = <8>;
-       cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
        keep-power-in-suspend;
        enable-sdio-wakeup;
 &usdhc4 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc4>;
-       cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
index 4d1a4b977d8492c62085f87ecff7ad29704eb4ad..fdd1d7c9a5cc2608ac047f088f62e394d88db124 100644 (file)
 &usdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_usdhc1>;
-       cd-gpios = <&gpio5 0 0>;
-       wp-gpios = <&gpio5 1 0>;
+       cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
        enable-sdio-wakeup;
        keep-power-in-suspend;
        status = "okay";
index 50e555eab50d98e971014e57ed8611df5a07a007..1b6494fbdb91b9301c607652efbd9a9fb34a9f36 100644 (file)
@@ -86,7 +86,7 @@
                        gpio,syscon-dev = <&devctrl 0x240>;
                };
 
-               pcie@21020000 {
+               pcie1: pcie@21020000 {
                        compatible = "ti,keystone-pcie","snps,dw-pcie";
                        clocks = <&clkpcie1>;
                        clock-names = "pcie";
@@ -96,6 +96,7 @@
                        ranges = <0x81000000 0 0 0x23260000 0x4000 0x4000
                                0x82000000 0 0x60000000 0x60000000 0 0x10000000>;
 
+                       status = "disabled";
                        device_type = "pci";
                        num-lanes = <2>;
 
index c06542b2c954452dcf891f579014478e573b8221..e7a6f6deabb6c0d89d4ca1e2c2ae63639249d010 100644 (file)
                        ti,syscon-dev = <&devctrl 0x2a0>;
                };
 
-               pcie@21800000 {
+               pcie0: pcie@21800000 {
                        compatible = "ti,keystone-pcie", "snps,dw-pcie";
                        clocks = <&clkpcie>;
                        clock-names = "pcie";
                        ranges = <0x81000000 0 0 0x23250000 0 0x4000
                                0x82000000 0 0x50000000 0x50000000 0 0x10000000>;
 
+                       status = "disabled";
                        device_type = "pci";
                        num-lanes = <2>;
 
index 233c69e50ae3ba1fc68f6f9f5e17a889c3c434ad..df8908adb0cbf2e0c1784559c181345892d36a85 100644 (file)
 
        lcd0: display@0 {
                compatible = "lgphilips,lb035q02";
-               label = "lcd";
+               label = "lcd35";
 
                reg = <1>;                                      /* CS1 */
                spi-max-frequency = <10000000>;
index f5395b7da912fc64ca79ae0683dabe5a309bcfcf..048fd216970a9acf4627a8edd1f409b61180e2ac 100644 (file)
@@ -98,7 +98,7 @@
 
        lcd0: display@0 {
                compatible = "samsung,lte430wq-f0c", "panel-dpi";
-               label = "lcd";
+               label = "lcd43";
 
                pinctrl-names = "default";
                pinctrl-0 = <&lte430_pins>;
index f884d6adb71e8ed28371afd54c04528403ad29ba..7d31c6ff246f47b14afd5eeb332d01a955faef35 100644 (file)
                        reg = <0x4a066000 0x100>;
                        interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
                        ti,hwmods = "mmu_dsp";
+                       #iommu-cells = <0>;
                };
 
                mmu_ipu: mmu@55082000 {
                        reg = <0x55082000 0x100>;
                        interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
                        ti,hwmods = "mmu_ipu";
+                       #iommu-cells = <0>;
                        ti,iommu-bus-err-back;
                };
 
index 7d24ae0306b56845c80c2c783dd818033a3a6cd1..c8fd648a7108515def0e9492936fd3760f156579 100644 (file)
                        reg = <0x4a066000 0x100>;
                        interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
                        ti,hwmods = "mmu_dsp";
+                       #iommu-cells = <0>;
                };
 
                mmu_ipu: mmu@55082000 {
                        reg = <0x55082000 0x100>;
                        interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
                        ti,hwmods = "mmu_ipu";
+                       #iommu-cells = <0>;
                        ti,iommu-bus-err-back;
                };
 
index 71468a7eb28f146b3c96c06b955ebb3444a8d033..5e17fd14772884c14dffdffa8d14b9e158d9800d 100644 (file)
        rxc-skew-ps = <2000>;
 };
 
-&mmc0 {
-       vmmc-supply = <&regulator_3_3v>;
-       vqmmc-supply = <&regulator_3_3v>;
-};
-
-&usb1 {
-       status = "okay";
-};
-
 &gpio2 {
        status = "okay";
 };
 
-&i2c1{
+&i2c1 {
        status = "okay";
 
-       accel1: accel1@53{
-               compatible = "adxl34x";
+       accel1: accelerometer@53 {
+               compatible = "adi,adxl345";
                reg = <0x53>;
 
-               interrupt-parent = < &portc >;
+               interrupt-parent = <&portc>;
                interrupts = <3 2>;
        };
 };
+
+&mmc0 {
+       vmmc-supply = <&regulator_3_3v>;
+       vqmmc-supply = <&regulator_3_3v>;
+};
+
+&usb1 {
+       status = "okay";
+};
index d42c84b1df8d02aa4199ac01e68ee547458576d1..e48857249ce7bfc8a9440fa25d80c97460436697 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr1310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 9d342920695a5f4e465b04332cebddc9ff73c347..54bc6d3cf2903e8d710aaa0ea178232d3be09942 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr1310 SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index b23e05ed1d60226d36f3649b3e1a12ee2928c4bd..c611f5606dfea5e2ba37ab5b0b57b7b73e0eed37 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr1340 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 13e1aa33daa2e379bde1b3cbc2882b1816ba3a39..df2232d767ed2ea7ffca8383e258cecb768dc679 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr1340 SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 40accc87e3a252580c1008fa041356a40093620e..14594ce8c18a5a889bbb0fb1fd0c02d7161638c3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr13xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 5de1431653e4c5a9f4152ce8323b0c111dd80b65..e859e8288bcde7681dad671ec78c7c288c54a731 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index f79b3dfaabe6c2cc3f984148dde816e1674bc803..f4e92e599729097f195dc6dd368c6628f3196b9c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr300 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index b09632963d158dfa1f505fc8b0325b5aaa64f42e..070f2c1b78511399c458e2726b6cffb10482c476 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 95372080eea60b38f3397cebf52bfb5364900e1c..da210b4547533d1127649233839ad117865bcad2 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr310 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index fdedbb514102b2920a2ea45edcbfa860b28b346e..1b1034477923cfd469b718f83b87e539188eff48 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 Evaluation Baord
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index ffea342aeec99575612c38224f05aeaa70594640..22be6e5edaacd845596d2ccd6889a6a3e0944ecd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for SPEAr320 SoC
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index f0e3fcf8e3237e2a63cc7e83579cb5f785efbec5..118135d7589908b424db93f8981587f37c966a72 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DTS file for all SPEAr3xx SoCs
  *
- * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
+ * Copyright 2012 Viresh Kumar <vireshk@kernel.org>
  *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
index 32dd55e5f4e6b8567a8a2d207b69311959c9f9a1..6eaaf638e52e217dacf0769cf5c22251754ffc60 100644 (file)
        model = "ST-Ericsson U8540 platform with Device Tree";
        compatible = "st-ericsson,ccu8540", "st-ericsson,u8540";
 
+       /* This stablilizes the serial port enumeration */
+       aliases {
+               serial0 = &ux500_serial0;
+               serial1 = &ux500_serial1;
+               serial2 = &ux500_serial2;
+       };
+
        memory@0 {
                device_type = "memory";
                reg = <0x20000000 0x1f000000>, <0xc0000000 0x3f000000>;
index 651c56d400a416e4056208b0ddd2b9963aaa2daa..c8b815819cfe3cb6b3268cf700aedfbbfbd4a908 100644 (file)
        model = "ST-Ericsson CCU9540 platform with Device Tree";
        compatible = "st-ericsson,ccu9540", "st-ericsson,u9540";
 
+       /* This stablilizes the serial port enumeration */
+       aliases {
+               serial0 = &ux500_serial0;
+               serial1 = &ux500_serial1;
+               serial2 = &ux500_serial2;
+       };
+
        memory {
                reg = <0x00000000 0x20000000>;
        };
index 853684ad777337771b48be2982a7c525f95ef1d6..a75f3289e653ab2973e2d7dd1cb12c8a12724451 100644 (file)
                        power-domains = <&pm_domains DOMAIN_VAPE>;
                };
 
-               uart@80120000 {
+               ux500_serial0: uart@80120000 {
                        compatible = "arm,pl011", "arm,primecell";
                        reg = <0x80120000 0x1000>;
                        interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
 
-               uart@80121000 {
+               ux500_serial1: uart@80121000 {
                        compatible = "arm,pl011", "arm,primecell";
                        reg = <0x80121000 0x1000>;
                        interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
 
-               uart@80007000 {
+               ux500_serial2: uart@80007000 {
                        compatible = "arm,pl011", "arm,primecell";
                        reg = <0x80007000 0x1000>;
                        interrupts = <0 26 IRQ_TYPE_LEVEL_HIGH>;
index 744c1e3a744df1530ba53cd2a56029a9e9ac5d37..6d8ce154347e7a9a78d02e0c06701a02fc02a4ad 100644 (file)
                        status = "okay";
                };
 
+               /* This UART is unused and thus left disabled */
                uart@80121000 {
                        pinctrl-names = "default", "sleep";
                        pinctrl-0 = <&uart1_default_mode>;
                        pinctrl-1 = <&uart1_sleep_mode>;
-                       status = "okay";
                };
 
                uart@80007000 {
index 2b1cb5b584b664033bd5eaba7c7f527ad35c5cf5..18e9795a94f974ccc2522bb7e8c6a0601dde5465 100644 (file)
        model = "ST-Ericsson HREF (pre-v60) and ST UIB";
        compatible = "st-ericsson,mop500", "st-ericsson,u8500";
 
+       /* This stablilizes the serial port enumeration */
+       aliases {
+               serial0 = &ux500_serial0;
+               serial1 = &ux500_serial1;
+               serial2 = &ux500_serial2;
+       };
+
        soc {
                /* Reset line for the BU21013 touchscreen */
                i2c@80110000 {
index 59523f86681237936420cb3212f69c5b6227a65a..24739914e689edb04ccafb33caeaced14cb6fe63 100644 (file)
 / {
        model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB";
        compatible = "st-ericsson,mop500", "st-ericsson,u8500";
+
+       /* This stablilizes the serial port enumeration */
+       aliases {
+               serial0 = &ux500_serial0;
+               serial1 = &ux500_serial1;
+               serial2 = &ux500_serial2;
+       };
 };
index 7f3975b58d16607d5cf79f10eca8df1dc75ab9d5..b0278f4c486cefa6f3363f91fd3f312bfc87015f 100644 (file)
        };
 
        soc {
+               /* Enable UART1 on this board */
+               uart@80121000 {
+                       status = "okay";
+               };
+
                i2c@80004000 {
                        tps61052@33 {
                                compatible = "tps61052";
index 8c6a2de56cf121305a5f4bde0cf19255a60ff1fe..c2e1ba019a2f6e3d2411a39464e1c9212a2f2ac5 100644 (file)
        model = "ST-Ericsson HREF (v60+) and ST UIB";
        compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
 
+       /* This stablilizes the serial port enumeration */
+       aliases {
+               serial0 = &ux500_serial0;
+               serial1 = &ux500_serial1;
+               serial2 = &ux500_serial2;
+       };
+
        soc {
                /* Reset line for the BU21013 touchscreen */
                i2c@80110000 {
index d53cccdce776b8d9253f9943ee1ad8f427a4a2d3..ebd8547e98f11c181d3d21353660a576355a13fd 100644 (file)
 / {
        model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB";
        compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
+
+       /* This stablilizes the serial port enumeration */
+       aliases {
+               serial0 = &ux500_serial0;
+               serial1 = &ux500_serial1;
+               serial2 = &ux500_serial2;
+       };
 };
index a4bc9e77d640d3762b01952ca23ae07d558cab36..810cda743b6d56ae19118260367f986dcee690af 100644 (file)
                                  <&vaudio_hf_hrefv60_mode>,
                                  <&gbf_hrefv60_mode>,
                                  <&hdtv_hrefv60_mode>,
-                                 <&touch_hrefv60_mode>;
+                                 <&touch_hrefv60_mode>,
+                                 <&gpios_hrefv60_mode>;
 
                        sdi0 {
-                               /* SD card detect GPIO pin, extend default state */
                                sdi0_default_mode: sdi0_default {
+                                       /* SD card detect GPIO pin, extend default state */
                                        default_hrefv60_cfg1 {
                                                pins = "GPIO95_E8";
                                                ste,config = <&gpio_in_pu>;
                                        };
+                                       /* VMMCI level-shifter enable */
+                                       default_hrefv60_cfg2 {
+                                               pins = "GPIO169_D22";
+                                               ste,config = <&gpio_out_lo>;
+                                       };
+                                       /* VMMCI level-shifter voltage select */
+                                       default_hrefv60_cfg3 {
+                                               pins = "GPIO5_AG6";
+                                               ste,config = <&gpio_out_hi>;
+                                       };
                                };
                        };
                        ipgpio {
                                        };
                                };
                        };
+                       gpios {
+                               /* Dangling GPIO pins */
+                               gpios_hrefv60_mode: gpios_hrefv60 {
+                                       default_cfg1 {
+                                               /* Normally UART1 RXD, now dangling */
+                                               pins = "GPIO4_AH6";
+                                               ste,config = <&in_pu>;
+                                       };
+                               };
+                       };
                };
        };
 };
index 9edadc37719ffa491aa3ad95c15d27ded3585727..32a5ccb14e7ebfbaec118ceed1f3bc14c2b646df 100644 (file)
        model = "Calao Systems Snowball platform with device tree";
        compatible = "calaosystems,snowball-a9500", "st-ericsson,u9500";
 
+       /* This stablilizes the serial port enumeration */
+       aliases {
+               serial0 = &ux500_serial0;
+               serial1 = &ux500_serial1;
+               serial2 = &ux500_serial2;
+       };
+
        memory {
                reg = <0x00000000 0x20000000>;
        };
                        status = "okay";
                };
 
+               /* This UART is unused and thus left disabled */
                uart@80121000 {
                        pinctrl-names = "default", "sleep";
                        pinctrl-0 = <&uart1_default_mode>;
                        pinctrl-1 = <&uart1_sleep_mode>;
-                       status = "okay";
                };
 
                uart@80007000 {
                                                pins = "GPIO21_AB3"; /* DAT31DIR */
                                                ste,config = <&out_hi>;
                                        };
-
+                                       /* SD card detect GPIO pin, extend default state */
+                                       snowball_cfg2 {
+                                               pins = "GPIO218_AH11";
+                                               ste,config = <&gpio_in_pu>;
+                                       };
+                                       /* VMMCI level-shifter enable */
+                                       snowball_cfg3 {
+                                               pins = "GPIO217_AH12";
+                                               ste,config = <&gpio_out_lo>;
+                                       };
+                                       /* VMMCI level-shifter voltage select */
+                                       snowball_cfg4 {
+                                               pins = "GPIO228_AJ6";
+                                               ste,config = <&gpio_out_hi>;
+                                       };
                                };
                        };
                        ssp0 {
index 83c50193626ce03c1a5d590a8d8ff81fa861f180..30b3bc1666d2185b47c0550b920d27a92f43804e 100644 (file)
@@ -13,6 +13,7 @@ generic-y += kdebug.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += msgbuf.h
 generic-y += param.h
 generic-y += parport.h
index 6f225acc07c56bdef12a3ca68ae526d8fe836bf4..b7f6fb462ea0da21e59e67dbb4e75de729ffdfcc 100644 (file)
@@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
  */
 static inline phys_addr_t __virt_to_idmap(unsigned long x)
 {
-       if (arch_virt_to_idmap)
+       if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
                return arch_virt_to_idmap(x);
        else
                return __virt_to_phys(x);
diff --git a/arch/arm/include/asm/mm-arch-hooks.h b/arch/arm/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 7056660..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARM_MM_ARCH_HOOKS_H
-#define _ASM_ARM_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_ARM_MM_ARCH_HOOKS_H */
index 357f57ea83f4b82fe0712c7af9a06d677be34d18..54272e0be7137c853f2d684d236449157ef55193 100644 (file)
@@ -818,12 +818,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
                        if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
                                break;
 
-               of_node_put(dn);
                if (cpu >= nr_cpu_ids) {
                        pr_warn("Failed to find logical CPU for %s\n",
                                dn->name);
+                       of_node_put(dn);
                        break;
                }
+               of_node_put(dn);
 
                irqs[i] = cpu;
                cpumask_set_cpu(cpu, &pmu->supported_cpus);
index 1a4d232796be67787a305fabb35a1955fe909f94..38269358fd252c6bb93fd58a0478319c436cdfd3 100644 (file)
@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
        flush_cache_all();
 
        /* Switch to the identity mapping. */
-       phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+       phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
        phys_reset((unsigned long)addr);
 
        /* Should never get here. */
index 80bad29d609ac2bf2be07f4de20d3650d8ea28d9..8c4467fad8370c73374ff104209b779e580852e1 100644 (file)
@@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void)
        }
 }
 
-#ifdef CONFIG_PM_GENERIC_DOMAINS
-
 static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
 {
        int iso, iso2sw;
@@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
 static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
 {
        struct clk *clk;
-       bool is_off;
        int i;
 
        imx6q_pu_domain.reg = pu_reg;
@@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
        }
        imx6q_pu_domain.num_clks = i;
 
-       is_off = IS_ENABLED(CONFIG_PM);
-       if (is_off) {
-               _imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
-       } else {
-               /*
-                * Enable power if compiled without CONFIG_PM in case the
-                * bootloader disabled it.
-                */
-               imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
-       }
+       /* Enable power always in case bootloader disabled it. */
+       imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+
+       if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
+               return 0;
 
-       pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
+       pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
        return of_genpd_add_provider_onecell(dev->of_node,
                                             &imx_gpc_onecell_data);
 
@@ -437,13 +429,6 @@ clk_err:
        return -EINVAL;
 }
 
-#else
-static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
-{
-       return 0;
-}
-#endif /* CONFIG_PM_GENERIC_DOMAINS */
-
 static int imx_gpc_probe(struct platform_device *pdev)
 {
        struct regulator *pu_reg;
index ecc04ff13e9595213aa57178bb7b0c40183c77c9..4a023e8d1bdb001c01cf361efada2f3ee6944d68 100644 (file)
@@ -60,6 +60,7 @@ config SOC_AM43XX
        select ARM_GIC
        select MACH_OMAP_GENERIC
        select MIGHT_HAVE_CACHE_L2X0
+       select HAVE_ARM_SCU
 
 config SOC_DRA7XX
        bool "TI DRA7XX"
index c092730749b9d13228bad861535b2fa98804ac84..bf366b39fa611d8b03028c3f7129c2b3ef7f9c5e 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/ata_platform.h>
 #include <linux/serial_8250.h>
 #include <linux/gpio.h>
+#include <linux/regulator/machine.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -144,6 +145,8 @@ static void __init capc7117_init(void)
 
        capc7117_uarts_init();
        capc7117_ide_init();
+
+       regulator_has_full_constraints();
 }
 
 MACHINE_START(CAPC7117,
index bb99f59a36d880f8879824af44b67c924078b283..a17a91eb8e9a384aae47458ae9e84665aa8e60df 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/irq.h>
 #include <linux/gpio.h>
+#include <linux/regulator/machine.h>
 
 #include <linux/dm9000.h>
 #include <linux/leds.h>
@@ -466,6 +467,8 @@ static void __init cmx2xx_init(void)
        cmx2xx_init_ac97();
        cmx2xx_init_touchscreen();
        cmx2xx_init_leds();
+
+       regulator_has_full_constraints();
 }
 
 static void __init cmx2xx_init_irq(void)
index 4d3588d26c2a18854b6fc81430dafe371a6af747..5851f4c254c1618cc4a8e433491b3e08e8e5448b 100644 (file)
@@ -835,6 +835,8 @@ static void __init cm_x300_init(void)
        cm_x300_init_ac97();
        cm_x300_init_wi2wi();
        cm_x300_init_bl();
+
+       regulator_has_full_constraints();
 }
 
 static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
index 5f9d9303b346d6c381132a9f8b5664200ed6c6ed..3503826333c74809c9276f022722ce4b4f0a888e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/physmap.h>
 #include <linux/platform_device.h>
+#include <linux/regulator/machine.h>
 #include <linux/ucb1400.h>
 
 #include <asm/mach/arch.h>
@@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(void)
                printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
                                colibri_pxa270_baseboard);
        }
+
+       regulator_has_full_constraints();
 }
 
 /* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
index 51531ecffca85783fa7cbd1ab608f9f52daa3846..9d7072b040458ff741ed8d30ccddaf9593fb1bd3 100644 (file)
@@ -1306,6 +1306,8 @@ static void __init em_x270_init(void)
        em_x270_init_i2c();
        em_x270_init_camera();
        em_x270_userspace_consumers_init();
+
+       regulator_has_full_constraints();
 }
 
 MACHINE_START(EM_X270, "Compulab EM-X270")
index c98511c5abd10003818c0950841c3f7a4d0a3fd4..9b0eb0252af6facf54158a70d97712a92373c1af 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/can/platform/mcp251x.h>
+#include <linux/regulator/machine.h>
 
 #include "generic.h"
 
@@ -185,6 +186,8 @@ static void __init icontrol_init(void)
        mxm_8x10_mmc_init();
 
        icontrol_can_init();
+
+       regulator_has_full_constraints();
 }
 
 MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
index 872dcb20e75784710000bb7d409407be269e2bd3..066e3a250ee039bb4e9b72324a06eb4cafb12fbb 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/dm9000.h>
 #include <linux/mtd/physmap.h>
 #include <linux/mtd/partitions.h>
+#include <linux/regulator/machine.h>
 #include <linux/i2c/pxa-i2c.h>
 
 #include <asm/types.h>
@@ -534,6 +535,8 @@ static void __init trizeps4_init(void)
 
        BCR_writew(trizeps_conxs_bcr);
        board_backlight_power(1);
+
+       regulator_has_full_constraints();
 }
 
 static void __init trizeps4_map_io(void)
index aa89488f961ecfe9a1ae2cd27083b0284102987c..54122a983ae37664efaf21c6ee4e962412c024cb 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/dm9000.h>
 #include <linux/ucb1400.h>
 #include <linux/ata_platform.h>
+#include <linux/regulator/machine.h>
 #include <linux/regulator/max1586.h>
 #include <linux/i2c/pxa-i2c.h>
 
@@ -711,6 +712,8 @@ static void __init vpac270_init(void)
        vpac270_ts_init();
        vpac270_rtc_init();
        vpac270_ide_init();
+
+       regulator_has_full_constraints();
 }
 
 MACHINE_START(VPAC270, "Voipac PXA270")
index ac2ae5c71ab45b7428440ee8925b121533ba9509..6158566fa0f7421bab2bd1b832fee5ca18b16a3a 100644 (file)
@@ -868,6 +868,8 @@ static void __init zeus_init(void)
        i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
        pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info);
        spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info));
+
+       regulator_has_full_constraints();
 }
 
 static struct map_desc zeus_io_desc[] __initdata = {
index a99d90a4d09c522d0d01c5fd54d5dd2b3c1cdfda..06640914d9a08c79da2d04eb1d42d10577ec9798 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2009-2012 ST Microelectronics
  * Rajeev Kumar <rajeev-dlh.kumar@st.com>
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 92da0a8c6bce6764cb7aebc1cdd07da7d8360ee0..7058720c5278a6d3809a8635cfffe3710219b102 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2009-2012 ST Microelectronics
  * Rajeev Kumar <rajeev-dlh.kumar@st.com>
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 935639ce59ba0e5e21da5d2b01b71751c81a254f..cfaf7c665b5881aedd5d83d09e22df41a0406545 100644 (file)
@@ -4,7 +4,7 @@
  * Miscellaneous registers definitions for SPEAr3xx machine family
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index f2d6a01765754b2bce876b41748eb8fe14f6f1bf..5ed841ccf8a38c4dc6db294d0eb90323b88b6e8f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2009,2012 ST Microelectronics
  * Rajeev Kumar<rajeev-dlh.kumar@st.com>
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 51b2dc93e4dadfd22a5d6a225bcf70b846febc4b..8439b9c12edb616834a6eeb74274e22e7d02ed04 100644 (file)
@@ -4,7 +4,7 @@
  * Serial port stubs for kernel decompress status messages
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index cfa1199d0f4a86152fddfae9f83d3074c330e3a8..b4529f3e0ee976b784e59fdd5ff2d45f64283cbf 100644 (file)
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index eb6590ded40dfdaec09b08a7eaf415b4211733ee..608dec6725aea4189dc2486926d96fff3869c0b8 100644 (file)
@@ -4,7 +4,7 @@
  * DMAC pl080 definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ce5e098c48889347227d876fd181023babc1e31b..b4342155a7833eb350cf668a1ce645f20a7b091e 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr platform specific restart functions
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index d9ce4d8000f092eb005b2624f87b467a69c996b4..cd5d375d91f0054a6c55926ae8582503a251e908 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1310 machine source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 3f3c0f124bd384fb7f00860e404f8df77c6c6392..94594d5a446c0f23e1f995487fc2a99d06f24f6a 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1340 machine source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 2e463a93468d2da133f4f311cd935946eb79e585..b7afce6795f436e9259e84d34f01fa246173eedf 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr13XX machines common source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index b52e48f342f444f319ad51429c6ce38ab9e6db26..5b32edda22769b7f3b76fe5bc16143b3917666aa 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr300 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ed2029db391f6071cb69b8bb0d5099193a9b805a..86a44ac7ff67b6b404d69a11172a3a8b21f8be24 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr310 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index bf634b32a930c2d8b6eb54749a4e3a0c39f5e995..d45d751926c50e344cf168002eecb185eb82a511 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr320 machine source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index bf3b1fd8cb2398b62e6990c3f116749c76f375ec..23394ac76cf22886e40cb5ec1e098586a43a774d 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr3XX machines common source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 1ced8a0f7a52624cae84203eb0d3417aed5171db..cba12f34ff774ca08216c5d2b6061b07b68b1a99 100644 (file)
@@ -1971,7 +1971,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
 {
        int next_bitmap;
 
-       if (mapping->nr_bitmaps > mapping->extensions)
+       if (mapping->nr_bitmaps >= mapping->extensions)
                return -EINVAL;
 
        next_bitmap = mapping->nr_bitmaps;
index 0716bbe198728876e81361897b69258e3489bf24..de2b246fed3808fce444b560a725ed4007464174 100644 (file)
@@ -274,7 +274,10 @@ __v7_ca15mp_setup:
 __v7_b15mp_setup:
 __v7_ca17mp_setup:
        mov     r10, #0
-1:
+1:     adr     r12, __v7_setup_stack           @ the local stack
+       stmia   r12, {r0-r5, lr}                @ v7_invalidate_l1 touches r0-r6
+       bl      v7_invalidate_l1
+       ldmia   r12, {r0-r5, lr}
 #ifdef CONFIG_SMP
        ALT_SMP(mrc     p15, 0, r0, c1, c0, 1)
        ALT_UP(mov      r0, #(1 << 6))          @ fake it for UP
@@ -283,7 +286,7 @@ __v7_ca17mp_setup:
        orreq   r0, r0, r10                     @ Enable CPU-specific SMP bits
        mcreq   p15, 0, r0, c1, c0, 1
 #endif
-       b       __v7_setup
+       b       __v7_setup_cont
 
 /*
  * Errata:
@@ -413,10 +416,11 @@ __v7_pj4b_setup:
 
 __v7_setup:
        adr     r12, __v7_setup_stack           @ the local stack
-       stmia   r12, {r0-r5, r7, r9, r11, lr}
+       stmia   r12, {r0-r5, lr}                @ v7_invalidate_l1 touches r0-r6
        bl      v7_invalidate_l1
-       ldmia   r12, {r0-r5, r7, r9, r11, lr}
+       ldmia   r12, {r0-r5, lr}
 
+__v7_setup_cont:
        and     r0, r9, #0xff000000             @ ARM?
        teq     r0, #0x41000000
        bne     __errata_finish
@@ -480,7 +484,7 @@ ENDPROC(__v7_setup)
 
        .align  2
 __v7_setup_stack:
-       .space  4 * 11                          @ 11 registers
+       .space  4 * 7                           @ 12 registers
 
        __INITDATA
 
index 4550d247e308be128b439b0735d853f4745f3618..876060bcceeb3ea989e24fe18b42910f3cce4058 100644 (file)
@@ -74,32 +74,52 @@ struct jit_ctx {
 
 int bpf_jit_enable __read_mostly;
 
-static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+                     unsigned int size)
+{
+       void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+
+       if (!ptr)
+               return -EFAULT;
+       memcpy(ret, ptr, size);
+       return 0;
+}
+
+static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
 {
        u8 ret;
        int err;
 
-       err = skb_copy_bits(skb, offset, &ret, 1);
+       if (offset < 0)
+               err = call_neg_helper(skb, offset, &ret, 1);
+       else
+               err = skb_copy_bits(skb, offset, &ret, 1);
 
        return (u64)err << 32 | ret;
 }
 
-static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
 {
        u16 ret;
        int err;
 
-       err = skb_copy_bits(skb, offset, &ret, 2);
+       if (offset < 0)
+               err = call_neg_helper(skb, offset, &ret, 2);
+       else
+               err = skb_copy_bits(skb, offset, &ret, 2);
 
        return (u64)err << 32 | ntohs(ret);
 }
 
-static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
 {
        u32 ret;
        int err;
 
-       err = skb_copy_bits(skb, offset, &ret, 4);
+       if (offset < 0)
+               err = call_neg_helper(skb, offset, &ret, 4);
+       else
+               err = skb_copy_bits(skb, offset, &ret, 4);
 
        return (u64)err << 32 | ntohl(ret);
 }
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
                case BPF_LD | BPF_B | BPF_ABS:
                        load_order = 0;
 load:
-                       /* the interpreter will deal with the negative K */
-                       if ((int)k < 0)
-                               return -ENOTSUPP;
                        emit_mov_i(r_off, k, ctx);
 load_common:
                        ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ load_common:
                                emit(ARM_SUB_I(r_scratch, r_skb_hl,
                                               1 << load_order), ctx);
                                emit(ARM_CMP_R(r_scratch, r_off), ctx);
-                               condt = ARM_COND_HS;
+                               condt = ARM_COND_GE;
                        } else {
                                emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
                                condt = ARM_COND_HI;
                        }
 
+                       /*
+                        * test for negative offset, only if we are
+                        * currently scheduled to take the fast
+                        * path. this will update the flags so that
+                        * the slowpath instruction are ignored if the
+                        * offset is negative.
+                        *
+                        * for loard_order == 0 the HI condition will
+                        * make loads at offset 0 take the slow path too.
+                        */
+                       _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+
                        _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
                              ctx);
 
@@ -828,7 +857,9 @@ b_epilogue:
                        emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
                        break;
                case BPF_ANC | SKF_AD_IFINDEX:
+               case BPF_ANC | SKF_AD_HATYPE:
                        /* A = skb->dev->ifindex */
+                       /* A = skb->dev->type */
                        ctx->seen |= SEEN_SKB;
                        off = offsetof(struct sk_buff, dev);
                        emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
@@ -838,8 +869,24 @@ b_epilogue:
 
                        BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
                                                  ifindex) != 4);
-                       off = offsetof(struct net_device, ifindex);
-                       emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+                       BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+                                                 type) != 2);
+
+                       if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+                               off = offsetof(struct net_device, ifindex);
+                               emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+                       } else {
+                               /*
+                                * offset of field "type" in "struct
+                                * net_device" is above what can be
+                                * used in the ldrh rd, [rn, #imm]
+                                * instruction, so load the offset in
+                                * a register and use ldrh rd, [rn, rm]
+                                */
+                               off = offsetof(struct net_device, type);
+                               emit_mov_i(ARM_R3, off, ctx);
+                               emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
+                       }
                        break;
                case BPF_ANC | SKF_AD_MARK:
                        ctx->seen |= SEEN_SKB;
@@ -860,9 +907,22 @@ b_epilogue:
                        off = offsetof(struct sk_buff, vlan_tci);
                        emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
                        if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
-                               OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
-                       else
-                               OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
+                               OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
+                       else {
+                               OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
+                               OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
+                       }
+                       break;
+               case BPF_ANC | SKF_AD_PKTTYPE:
+                       ctx->seen |= SEEN_SKB;
+                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+                                                 __pkt_type_offset[0]) != 1);
+                       off = PKT_TYPE_OFFSET();
+                       emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
+                       emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+                       emit(ARM_LSR_I(r_A, r_A, 5), ctx);
+#endif
                        break;
                case BPF_ANC | SKF_AD_QUEUE:
                        ctx->seen |= SEEN_SKB;
@@ -873,6 +933,14 @@ b_epilogue:
                        off = offsetof(struct sk_buff, queue_mapping);
                        emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
                        break;
+               case BPF_ANC | SKF_AD_PAY_OFFSET:
+                       ctx->seen |= SEEN_SKB | SEEN_CALL;
+
+                       emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
+                       emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
+                       emit_blx_r(ARM_R3, ctx);
+                       emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+                       break;
                case BPF_LDX | BPF_W | BPF_ABS:
                        /*
                         * load a 32bit word from struct seccomp_data.
index b2d7d92859d37e11c31f54c751a1cb78c419713b..4b17d5ab652a4029e7cc73165075a75068e0ef6c 100644 (file)
@@ -74,6 +74,7 @@
 #define ARM_INST_LDRB_I                0x05d00000
 #define ARM_INST_LDRB_R                0x07d00000
 #define ARM_INST_LDRH_I                0x01d000b0
+#define ARM_INST_LDRH_R                0x019000b0
 #define ARM_INST_LDR_I         0x05900000
 
 #define ARM_INST_LDM           0x08900000
                                 | (rm))
 #define ARM_LDRH_I(rt, rn, off)        (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
                                 | (((off) & 0xf0) << 4) | ((off) & 0xf))
+#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+                                | (rm))
 
 #define ARM_LDM(rn, regs)      (ARM_INST_LDM | (rn) << 16 | (regs))
 
index b112a39834d046b65ea8af2df2555051da1223c3..70fd9ffb58cfc08e82f3a978e6630c70eaeb1840 100644 (file)
@@ -25,6 +25,7 @@ generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += msi.h
diff --git a/arch/arm64/include/asm/mm-arch-hooks.h b/arch/arm64/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 562b655..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_ARM64_MM_ARCH_HOOKS_H
-#define _ASM_ARM64_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_ARM64_MM_ARCH_HOOKS_H */
index 9d4aa18f2a825256537a7d7e33c606d89d003ee7..e8ca6eaedd0252e2056530d71d519f423931d323 100644 (file)
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
 
        /* Show what we know for posterity */
        c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
-                            sizeof(vendor));
+                            sizeof(vendor) * sizeof(efi_char16_t));
        if (c16) {
                for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
                        vendor[i] = c16[i];
                vendor[i] = '\0';
-               early_memunmap(c16, sizeof(vendor));
+               early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
        }
 
        pr_info("EFI v%u.%.02u by %s\n",
index f860bfda454afb9de7007684ba60eaf7f375e130..e16351819fed9ada8575f117c4777232da0d72b9 100644 (file)
@@ -585,7 +585,8 @@ ENDPROC(el0_irq)
  *
  */
 ENTRY(cpu_switch_to)
-       add     x8, x0, #THREAD_CPU_CONTEXT
+       mov     x10, #THREAD_CPU_CONTEXT
+       add     x8, x0, x10
        mov     x9, sp
        stp     x19, x20, [x8], #16             // store callee-saved registers
        stp     x21, x22, [x8], #16
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to)
        stp     x27, x28, [x8], #16
        stp     x29, x9, [x8], #16
        str     lr, [x8]
-       add     x8, x1, #THREAD_CPU_CONTEXT
+       add     x8, x1, x10
        ldp     x19, x20, [x8], #16             // restore callee-saved registers
        ldp     x21, x22, [x8], #16
        ldp     x23, x24, [x8], #16
index 240b75c0e94fdc435a672b740b08d4a848915295..463fa2e7e34c10ba56f2cd4a10e208062feff3d9 100644 (file)
@@ -61,7 +61,7 @@ void __init init_IRQ(void)
 static bool migrate_one_irq(struct irq_desc *desc)
 {
        struct irq_data *d = irq_desc_get_irq_data(desc);
-       const struct cpumask *affinity = d->affinity;
+       const struct cpumask *affinity = irq_data_get_affinity_mask(d);
        struct irq_chip *c;
        bool ret = false;
 
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
        else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
-               cpumask_copy(d->affinity, affinity);
+               cpumask_copy(irq_data_get_affinity_mask(d), affinity);
 
        return ret;
 }
index 1d66afdfac079bcf78a8dc230ed326a9d2fbfa12..f61f2dd67464746c728474a7b5503dd7dfbdcb67 100644 (file)
@@ -12,6 +12,7 @@ generic-y += irq_work.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += param.h
 generic-y += percpu.h
 generic-y += preempt.h
diff --git a/arch/avr32/include/asm/mm-arch-hooks.h b/arch/avr32/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 145452f..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_AVR32_MM_ARCH_HOOKS_H
-#define _ASM_AVR32_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_AVR32_MM_ARCH_HOOKS_H */
index d0f771be9e96eda02c1045bbb5702cc9b9f74b8d..a124c55733dbf7dc19e61aacbf74671ff149de83 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <mach/pm.h>
 
+static bool disable_cpu_idle_poll;
 
 static cycle_t read_cycle_count(struct clocksource *cs)
 {
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta,
        return 0;
 }
 
-static void comparator_mode(enum clock_event_mode mode,
-               struct clock_event_device *evdev)
+static int comparator_shutdown(struct clock_event_device *evdev)
 {
-       switch (mode) {
-       case CLOCK_EVT_MODE_ONESHOT:
-               pr_debug("%s: start\n", evdev->name);
-               /* FALLTHROUGH */
-       case CLOCK_EVT_MODE_RESUME:
+       pr_debug("%s: %s\n", __func__, evdev->name);
+       sysreg_write(COMPARE, 0);
+
+       if (disable_cpu_idle_poll) {
+               disable_cpu_idle_poll = false;
                /*
-                * If we're using the COUNT and COMPARE registers we
-                * need to force idle poll.
+                * Only disable idle poll if we have forced that
+                * in a previous call.
                 */
-               cpu_idle_poll_ctrl(true);
-               break;
-       case CLOCK_EVT_MODE_UNUSED:
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               sysreg_write(COMPARE, 0);
-               pr_debug("%s: stop\n", evdev->name);
-               if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
-                   evdev->mode == CLOCK_EVT_MODE_RESUME) {
-                       /*
-                        * Only disable idle poll if we have forced that
-                        * in a previous call.
-                        */
-                       cpu_idle_poll_ctrl(false);
-               }
-               break;
-       default:
-               BUG();
+               cpu_idle_poll_ctrl(false);
        }
+       return 0;
+}
+
+static int comparator_set_oneshot(struct clock_event_device *evdev)
+{
+       pr_debug("%s: %s\n", __func__, evdev->name);
+
+       disable_cpu_idle_poll = true;
+       /*
+        * If we're using the COUNT and COMPARE registers we
+        * need to force idle poll.
+        */
+       cpu_idle_poll_ctrl(true);
+
+       return 0;
 }
 
 static struct clock_event_device comparator = {
-       .name           = "avr32_comparator",
-       .features       = CLOCK_EVT_FEAT_ONESHOT,
-       .shift          = 16,
-       .rating         = 50,
-       .set_next_event = comparator_next_event,
-       .set_mode       = comparator_mode,
+       .name                   = "avr32_comparator",
+       .features               = CLOCK_EVT_FEAT_ONESHOT,
+       .shift                  = 16,
+       .rating                 = 50,
+       .set_next_event         = comparator_next_event,
+       .set_state_shutdown     = comparator_shutdown,
+       .set_state_oneshot      = comparator_set_oneshot,
+       .tick_resume            = comparator_set_oneshot,
 };
 
 void read_persistent_clock(struct timespec *ts)
index 23b1a97fae7ad686acc581c5888e95d5e4928a1e..52c179bec0cc6c69d4430324a0a581f66e066a6d 100644 (file)
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
 {
        unsigned long flags;
 
+       if (!clk)
+               return 0;
+
        spin_lock_irqsave(&clk_lock, flags);
        __clk_enable(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
 {
        unsigned long flags;
 
+       if (IS_ERR_OR_NULL(clk))
+               return;
+
        spin_lock_irqsave(&clk_lock, flags);
        __clk_disable(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
        unsigned long flags;
        unsigned long rate;
 
+       if (!clk)
+               return 0;
+
        spin_lock_irqsave(&clk_lock, flags);
        rate = clk->get_rate(clk);
        spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
        unsigned long flags, actual_rate;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_rate)
                return -ENOSYS;
 
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        unsigned long flags;
        long ret;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_rate)
                return -ENOSYS;
 
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
        unsigned long flags;
        int ret;
 
+       if (!clk)
+               return 0;
+
        if (!clk->set_parent)
                return -ENOSYS;
 
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
 
 struct clk *clk_get_parent(struct clk *clk)
 {
-       return clk->parent;
+       return !clk ? NULL : clk->parent;
 }
 EXPORT_SYMBOL(clk_get_parent);
 
index 07051a63415d712bcd17469d6db5460c1353030e..61cd1e786a142c440caa231a665349ed3d8f8e01 100644 (file)
@@ -21,6 +21,7 @@ generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += mutex.h
diff --git a/arch/blackfin/include/asm/mm-arch-hooks.h b/arch/blackfin/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 1c5211e..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_BLACKFIN_MM_ARCH_HOOKS_H
-#define _ASM_BLACKFIN_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_BLACKFIN_MM_ARCH_HOOKS_H */
index 7aeb322729754f25a991a6358e1e5b4a9ad39118..f17c4dc6050c7d23ade635f749aa75eed8f0ac1d 100644 (file)
@@ -26,6 +26,7 @@ generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += mmu.h
 generic-y += mmu_context.h
diff --git a/arch/c6x/include/asm/mm-arch-hooks.h b/arch/c6x/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index bb3c4a6..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_C6X_MM_ARCH_HOOKS_H
-#define _ASM_C6X_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_C6X_MM_ARCH_HOOKS_H */
index d294f6aaff1d1631dc085e1e174bc8515e59b5cb..ad2244f35bca0c991e8c42702edb613dc8aa5334 100644 (file)
@@ -18,6 +18,7 @@ generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += module.h
 generic-y += percpu.h
 generic-y += preempt.h
diff --git a/arch/cris/include/asm/mm-arch-hooks.h b/arch/cris/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 314f774..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_CRIS_MM_ARCH_HOOKS_H
-#define _ASM_CRIS_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_CRIS_MM_ARCH_HOOKS_H */
index 30edce31e5c21e1e7cba707833f10a49d12eb42c..8e47b832cc7684af7a10d2a0680651cf5bf8cc0d 100644 (file)
@@ -4,5 +4,6 @@ generic-y += cputime.h
 generic-y += exec.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
diff --git a/arch/frv/include/asm/mm-arch-hooks.h b/arch/frv/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 51d13a8..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_FRV_MM_ARCH_HOOKS_H
-#define _ASM_FRV_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_FRV_MM_ARCH_HOOKS_H */
index 00379d64f707860c0101d6edb6be5f3aa3b9c35f..70e6ae1e700673e3acbd03452d22f57db9c1166d 100644 (file)
@@ -33,6 +33,7 @@ generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += mmu.h
 generic-y += mmu_context.h
index 5ade4a163558f924d85dfa5ec058bd4ba8ab1379..daee37bd09991a3edf732451587e59a8db001d54 100644 (file)
@@ -28,6 +28,7 @@ generic-y += kmap_types.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += pci.h
diff --git a/arch/hexagon/include/asm/mm-arch-hooks.h b/arch/hexagon/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 05e8b93..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_HEXAGON_MM_ARCH_HOOKS_H
-#define _ASM_HEXAGON_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_HEXAGON_MM_ARCH_HOOKS_H */
index ccff13d33fa21e89e31570a35b33f36c2ec11ad7..9de3ba12f6b97c0f8722e2f929c70fbae429fd34 100644 (file)
@@ -4,6 +4,7 @@ generic-y += exec.h
 generic-y += irq_work.h
 generic-y += kvm_para.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
 generic-y += vtime.h
diff --git a/arch/ia64/include/asm/mm-arch-hooks.h b/arch/ia64/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index ab4b5c6..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_IA64_MM_ARCH_HOOKS_H
-#define _ASM_IA64_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_IA64_MM_ARCH_HOOKS_H */
index ba1cdc0187315113e5a79850ea8b6dc08038800c..e0eb704ca1fa93755d678e82c2336c8e188a34f0 100644 (file)
@@ -4,6 +4,7 @@ generic-y += cputime.h
 generic-y += exec.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += module.h
 generic-y += preempt.h
 generic-y += sections.h
index 0c3f25ee3381d9fad606ea36bb09682d362a87d8..f8de767ce2bcc763979ef6e08d4ef4f862a89efb 100644 (file)
@@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr)
 #define iowrite16 writew
 #define iowrite32 writel
 
+#define ioread16be(addr)       be16_to_cpu(readw(addr))
+#define ioread32be(addr)       be32_to_cpu(readl(addr))
+#define iowrite16be(v, addr)   writew(cpu_to_be16(v), (addr))
+#define iowrite32be(v, addr)   writel(cpu_to_be32(v), (addr))
+
 #define mmiowb()
 
 #define flush_write_buffers() do { } while (0)  /* M32R_FIXME */
diff --git a/arch/m32r/include/asm/mm-arch-hooks.h b/arch/m32r/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 6d60b47..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_M32R_MM_ARCH_HOOKS_H
-#define _ASM_M32R_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_M32R_MM_ARCH_HOOKS_H */
index 33013dfcd3e1d58fb0b0f3495ce1956c76233340..c496d48a8c8d07f9c27cb2bf799a3f078b5c1674 100644 (file)
@@ -125,6 +125,13 @@ endif # M68KCLASSIC
 
 if COLDFIRE
 
+choice
+       prompt "ColdFire SoC type"
+       default M520x
+       help
+         Select the type of ColdFire System-on-Chip (SoC) that you want
+         to build for.
+
 config M5206
        bool "MCF5206"
        depends on !MMU
@@ -174,9 +181,6 @@ config M525x
        help
          Freescale (Motorola) Coldfire 5251/5253 processor support.
 
-config M527x
-       bool
-
 config M5271
        bool "MCF5271"
        depends on !MMU
@@ -223,9 +227,6 @@ config M5307
        help
          Motorola ColdFire 5307 processor support.
 
-config M53xx
-       bool
-
 config M532x
        bool "MCF532x"
        depends on !MMU
@@ -251,9 +252,6 @@ config M5407
        help
          Motorola ColdFire 5407 processor support.
 
-config M54xx
-       bool
-
 config M547x
        bool "MCF547x"
        select M54xx
@@ -280,6 +278,17 @@ config M5441x
        help
          Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
 
+endchoice
+
+config M527x
+       bool
+
+config M53xx
+       bool
+
+config M54xx
+       bool
+
 endif # COLDFIRE
 
 
@@ -416,22 +425,18 @@ config HAVE_MBAR
 config HAVE_IPSBAR
        bool
 
-config CLOCK_SET
-       bool "Enable setting the CPU clock frequency"
-       depends on COLDFIRE
-       default n
-       help
-         On some CPU's you do not need to know what the core CPU clock
-         frequency is. On these you can disable clock setting. On some
-         traditional 68K parts, and on all ColdFire parts you need to set
-         the appropriate CPU clock frequency. On these devices many of the
-         onboard peripherals derive their timing from the master CPU clock
-         frequency.
-
 config CLOCK_FREQ
        int "Set the core clock frequency"
+       default "25000000" if M5206
+       default "54000000" if M5206e
+       default "166666666" if M520x
+       default "140000000" if M5249
+       default "150000000" if M527x || M523x
+       default "90000000" if M5307
+       default "50000000" if M5407
+       default "266000000" if M54xx
        default "66666666"
-       depends on CLOCK_SET
+       depends on COLDFIRE
        help
          Define the CPU clock frequency in use. This is the core clock
          frequency, it may or may not be the same as the external clock
index e7292f460af45665ac7cdbef46c02a56f4b07921..4c7b7938d53af01eb5a154b9f916c35c8ba72974 100644 (file)
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SIGNALFD is not set
@@ -16,17 +12,12 @@ CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
-CONFIG_M520x=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=166666666
-CONFIG_CLOCK_DIV=2
-CONFIG_M5208EVB=y
+# CONFIG_MMU is not set
 # CONFIG_4KSTACKS is not set
 CONFIG_RAMBASE=0x40000000
 CONFIG_RAMSIZE=0x2000000
 CONFIG_VECTORBASE=0x40000000
 CONFIG_KERNELBASE=0x40020000
-CONFIG_RAM16BIT=y
 CONFIG_BINFMT_FLAT=y
 CONFIG_NET=y
 CONFIG_PACKET=y
@@ -40,24 +31,19 @@ CONFIG_INET=y
 # CONFIG_IPV6 is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
 CONFIG_MTD_UCLINUX=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
 CONFIG_FEC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
 CONFIG_SERIAL_MCF=y
 CONFIG_SERIAL_MCF_BAUDRATE=115200
 CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
@@ -68,8 +54,6 @@ CONFIG_EXT2_FS=y
 CONFIG_ROMFS_FS=y
 CONFIG_ROMFS_BACKED_BY_MTD=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_FULLDEBUG=y
 CONFIG_BOOTPARAM=y
 CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
+CONFIG_FULLDEBUG=y
index 0cd4b39f325b6c609a385f31de4cf4c76db2a957..a782f368650fe9597625560bfc39663094350f80 100644 (file)
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SIGNALFD is not set
@@ -16,10 +12,8 @@ CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
 CONFIG_M5249=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=140000000
-CONFIG_CLOCK_DIV=2
 CONFIG_M5249C3=y
 CONFIG_RAMBASE=0x00000000
 CONFIG_RAMSIZE=0x00800000
@@ -38,23 +32,18 @@ CONFIG_INET=y
 # CONFIG_IPV6 is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
 CONFIG_MTD_UCLINUX=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
 CONFIG_SERIAL_MCF=y
 CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
@@ -62,7 +51,5 @@ CONFIG_EXT2_FS=y
 CONFIG_ROMFS_FS=y
 CONFIG_ROMFS_BACKED_BY_MTD=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_BOOTPARAM=y
 CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
-# CONFIG_CRC32 is not set
index a60cb35091352d20399c39fbfe1cd863b072a5b8..6f5fb92f5cbf2e64c907d18b1eab7322c67f5dbd 100644 (file)
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SIGNALFD is not set
@@ -16,8 +12,8 @@ CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
 CONFIG_M5272=y
-CONFIG_CLOCK_SET=y
 CONFIG_M5272C3=y
 CONFIG_RAMBASE=0x00000000
 CONFIG_RAMSIZE=0x00800000
@@ -36,23 +32,18 @@ CONFIG_INET=y
 # CONFIG_IPV6 is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
 CONFIG_MTD_UCLINUX=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
 CONFIG_FEC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
 CONFIG_SERIAL_MCF=y
 CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
@@ -61,6 +52,5 @@ CONFIG_EXT2_FS=y
 CONFIG_ROMFS_FS=y
 CONFIG_ROMFS_BACKED_BY_MTD=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_BOOTPARAM=y
 CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
index e6502ab7cb2fee0ac7b2bd7a1089e41450f05780..b5d7cd1ce8562359f1ef52290f04728b053b2935 100644 (file)
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SIGNALFD is not set
@@ -16,11 +12,8 @@ CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
 CONFIG_M5275=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=150000000
-CONFIG_CLOCK_DIV=2
-CONFIG_M5275EVB=y
 # CONFIG_4KSTACKS is not set
 CONFIG_RAMBASE=0x00000000
 CONFIG_RAMSIZE=0x00000000
@@ -39,24 +32,19 @@ CONFIG_INET=y
 # CONFIG_IPV6 is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
 CONFIG_MTD_UCLINUX=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
 CONFIG_FEC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=y
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
 CONFIG_SERIAL_MCF=y
 CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
@@ -65,8 +53,5 @@ CONFIG_EXT2_FS=y
 CONFIG_ROMFS_FS=y
 CONFIG_ROMFS_BACKED_BY_MTD=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_BOOTPARAM=y
 CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
-# CONFIG_CRC32 is not set
index 023812abd2e6c6d701015dd99f70693dd1eb7251..1b4c09461c409c195e1966012fd4bf1461f34ab6 100644 (file)
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SIGNALFD is not set
@@ -16,10 +12,8 @@ CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
 CONFIG_M5307=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=90000000
-CONFIG_CLOCK_DIV=2
 CONFIG_M5307C3=y
 CONFIG_RAMBASE=0x00000000
 CONFIG_RAMSIZE=0x00800000
@@ -38,16 +32,11 @@ CONFIG_INET=y
 # CONFIG_IPV6 is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
 CONFIG_MTD_UCLINUX=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=y
 CONFIG_SLIP=y
 CONFIG_SLIP_COMPRESSED=y
@@ -56,21 +45,17 @@ CONFIG_SLIP_COMPRESSED=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_MCF=y
 CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_ROMFS_FS=y
 CONFIG_ROMFS_BACKED_BY_MTD=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_FULLDEBUG=y
 CONFIG_BOOTPARAM=y
 CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
-# CONFIG_CRC32 is not set
+CONFIG_FULLDEBUG=y
index 557b39f3be9094e223fb8715cae927c0c5ba1cd3..275ad543d4bcbe0223a6837a7e093a488749a2f5 100644 (file)
@@ -1,10 +1,6 @@
-# CONFIG_MMU is not set
-CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SIGNALFD is not set
@@ -17,9 +13,8 @@ CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
 CONFIG_M5407=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=50000000
 CONFIG_M5407C3=y
 CONFIG_RAMBASE=0x00000000
 CONFIG_RAMSIZE=0x00000000
@@ -38,22 +33,17 @@ CONFIG_INET=y
 # CONFIG_IPV6 is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_RAM=y
 CONFIG_MTD_UCLINUX=y
 CONFIG_BLK_DEV_RAM=y
-# CONFIG_MISC_DEVICES is not set
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
 CONFIG_PPP=y
 # CONFIG_INPUT is not set
 # CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
 CONFIG_SERIAL_MCF=y
 CONFIG_SERIAL_MCF_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
@@ -63,8 +53,5 @@ CONFIG_EXT2_FS=y
 CONFIG_ROMFS_FS=y
 CONFIG_ROMFS_BACKED_BY_MTD=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_BOOTPARAM=y
 CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
-# CONFIG_CRC32 is not set
index c5018a68819b5462b67544ee99606e8d4d333b4d..4f4ccd13c11bb164c1661d720b21d60c5226da6a 100644 (file)
@@ -1,11 +1,7 @@
-CONFIG_EXPERIMENTAL=y
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED=y
-CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_SYSCTL_SYSCALL=y
 # CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SIGNALFD is not set
@@ -20,19 +16,16 @@ CONFIG_MODULES=y
 # CONFIG_IOSCHED_DEADLINE is not set
 # CONFIG_IOSCHED_CFQ is not set
 CONFIG_COLDFIRE=y
-CONFIG_M547x=y
-CONFIG_CLOCK_SET=y
-CONFIG_CLOCK_FREQ=266000000
 # CONFIG_4KSTACKS is not set
 CONFIG_RAMBASE=0x0
 CONFIG_RAMSIZE=0x2000000
 CONFIG_VECTORBASE=0x0
 CONFIG_MBAR=0xff000000
 CONFIG_KERNELBASE=0x20000
+CONFIG_PCI=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_JEDECPROBE=y
index 1555bc189c7d65cfd8722e03dea2cd03d65109f4..eb85bd9c6180579bf119263bdce899045d10d375 100644 (file)
@@ -18,6 +18,7 @@ generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += mutex.h
 generic-y += percpu.h
index c94557b914482dfa7d272eaf73e9aa0951c7354d..50aa4dac9ca28ba444ced005a251e9de54bfbd7d 100644 (file)
@@ -19,7 +19,7 @@
  *     in any case new boards come along from time to time that have yet
  *     another different clocking frequency.
  */
-#ifdef CONFIG_CLOCK_SET
+#ifdef CONFIG_CLOCK_FREQ
 #define        MCF_CLK         CONFIG_CLOCK_FREQ
 #else
 #error "Don't know what your ColdFire CPU clock frequency is??"
index 618c85d3c786713c8787043f176eac5d74e13e5e..f55cad529400f65eb38832cce8c96760a44f1aad 100644 (file)
@@ -413,7 +413,8 @@ static inline void isa_delay(void)
 #define writew(val, addr)      out_le16((addr), (val))
 #endif /* CONFIG_ATARI_ROM_ISA */
 
-#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
+#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA) && \
+    !(defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE))
 /*
  * We need to define dummy functions for GENERIC_IOMAP support.
  */
diff --git a/arch/m68k/include/asm/mm-arch-hooks.h b/arch/m68k/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 7e8709b..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_M68K_MM_ARCH_HOOKS_H
-#define _ASM_M68K_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_M68K_MM_ARCH_HOOKS_H */
index 199320f3c34528c09b392c391e2bcc0874d1f1d5..df31353fd2001dc0e357feafcf975aa6e5537622 100644 (file)
@@ -25,6 +25,7 @@ generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += param.h
diff --git a/arch/metag/include/asm/mm-arch-hooks.h b/arch/metag/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index b0072b2..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_METAG_MM_ARCH_HOOKS_H
-#define _ASM_METAG_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_METAG_MM_ARCH_HOOKS_H */
index 9989ddb169cab99c25433f2586f184015568a1ef..2f222f355c4bbc69842ccd62f3419b0cbd1732a4 100644 (file)
@@ -6,6 +6,7 @@ generic-y += device.h
 generic-y += exec.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += syscalls.h
 generic-y += trace_clock.h
diff --git a/arch/microblaze/include/asm/mm-arch-hooks.h b/arch/microblaze/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 5c40659..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
-#define _ASM_MICROBLAZE_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_MICROBLAZE_MM_ARCH_HOOKS_H */
index aab7e46cadd5d843f3f7252a494fa9b0ef9af0c2..cee5f93e5712f3120d36847dc02d74709bc21929 100644 (file)
@@ -1427,6 +1427,7 @@ config CPU_MIPS64_R6
        select CPU_SUPPORTS_HIGHMEM
        select CPU_SUPPORTS_MSA
        select GENERIC_CSUM
+       select MIPS_O32_FP64_SUPPORT if MIPS32_O32
        help
          Choose this option to build a kernel for release 6 or later of the
          MIPS64 architecture.  New MIPS processors, starting with the Warrior
@@ -2262,11 +2263,6 @@ config MIPS_CM
 config MIPS_CPC
        bool
 
-config SB1_PASS_1_WORKAROUNDS
-       bool
-       depends on CPU_SB1_PASS_1
-       default y
-
 config SB1_PASS_2_WORKAROUNDS
        bool
        depends on CPU_SB1 && (CPU_SB1_PASS_2_2 || CPU_SB1_PASS_2)
index ae2dd59050f742c54c49b3f80925bbfc4e67fbbd..252e347958f383354a597323078512846f592d52 100644 (file)
@@ -181,13 +181,6 @@ cflags-$(CONFIG_CPU_R4000_WORKAROUNDS)     += $(call cc-option,-mfix-r4000,)
 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
 cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,)
 
-ifdef CONFIG_CPU_SB1
-ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
-KBUILD_AFLAGS_MODULE += -msb1-pass1-workarounds
-KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds
-endif
-endif
-
 # For smartmips configurations, there are hundreds of warnings due to ISA overrides
 # in assembly and header files. smartmips is only supported for MIPS32r1 onwards
 # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
index 7fe5c61a3cb83a0399bcd5e2745641f45eb08763..1f8546081d2068f3adaef34892354b9e6fb2119f 100644 (file)
@@ -7,6 +7,7 @@ generic-y += emergency-restart.h
 generic-y += irq_work.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mutex.h
 generic-y += parport.h
 generic-y += percpu.h
index 084780b355aa51ee00fff050f49dab33b07d0f5d..1b062518983525f0065841c628b4ac10b047aeb2 100644 (file)
@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
                goto fr_common;
 
        case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
       || defined(CONFIG_64BIT))
                /* we only have a 32-bit FPU */
                return SIGFPE;
index 0a227d426b9cbf5d284e8d523eba1939fd4d9294..520f8fc2c8067c7e3b77cd7cd03bd39a83586c11 100644 (file)
@@ -13,8 +13,7 @@
 #define R4600_V2_HIT_CACHEOP_WAR       0
 #define R5432_CP0_INTERRUPT_WAR                0
 
-#if defined(CONFIG_SB1_PASS_1_WORKAROUNDS) || \
-    defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
+#if defined(CONFIG_SB1_PASS_2_WORKAROUNDS)
 
 #ifndef __ASSEMBLY__
 extern int sb1250_m3_workaround_needed(void);
diff --git a/arch/mips/include/asm/mm-arch-hooks.h b/arch/mips/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index b5609fe..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_MIPS_MM_ARCH_HOOKS_H
-#define _ASM_MIPS_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_MIPS_MM_ARCH_HOOKS_H */
index 6c9906f59c6e69f4a6d778064204c8764653b12a..9081d88ae44f31a1bf0c2e4d5c4ae1bdc0e487db 100644 (file)
@@ -16,7 +16,7 @@
 
 /*
  * Keep this struct definition in sync with the sigcontext fragment
- * in arch/mips/tools/offset.c
+ * in arch/mips/kernel/asm-offsets.c
  */
 struct sigcontext {
        unsigned int            sc_regmask;     /* Unused */
@@ -46,7 +46,7 @@ struct sigcontext {
 #include <linux/posix_types.h>
 /*
  * Keep this struct definition in sync with the sigcontext fragment
- * in arch/mips/tools/offset.c
+ * in arch/mips/kernel/asm-offsets.c
  *
  * Warning: this structure illdefined with sc_badvaddr being just an unsigned
  * int so it was changed to unsigned long in 2.6.0-test1.  This may break
index beabe19ff8e5c963af0cf38147a3f4b7af64efca..072fab13645d503a3b71381fec8c5976f570a905 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * offset.c: Calculate pt_regs and task_struct offsets.
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
  *
  * Copyright (C) 1996 David S. Miller
  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
index a8bb972fd9fddfee74c791b750707970341861fc..cb9a095f5c5ec082188d21b96619c4fcb5fda7a0 100644 (file)
@@ -81,11 +81,6 @@ choice
        prompt "SiByte SOC Stepping"
        depends on SIBYTE_SB1xxx_SOC
 
-config CPU_SB1_PASS_1
-       bool "1250 Pass1"
-       depends on SIBYTE_SB1250
-       select CPU_HAS_PREFETCH
-
 config CPU_SB1_PASS_2_1250
        bool "1250 An"
        depends on SIBYTE_SB1250
index 5581844c9194bc85719554789b58f1121c711eca..41a1d22422112caa5b08cbc51b66ca47cf267a72 100644 (file)
@@ -81,10 +81,7 @@ void check_bus_watcher(void)
 {
        u32 status, l2_err, memio_err;
 
-#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
-       /* Destructive read, clears register and interrupt */
-       status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS));
-#elif defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
+#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
        /* Use non-destructive register */
        status = csr_in32(IOADDR(A_SCD_BUS_ERR_STATUS_DEBUG));
 #elif defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
index 3c02b2a77ae996b0d84ebe085fe697d6879cb144..9d3c24efdf4a338aeb83fe16566ae5045e506e33 100644 (file)
@@ -202,12 +202,10 @@ void __init sb1250_setup(void)
 
        switch (war_pass) {
        case K_SYS_REVISION_BCM1250_PASS1:
-#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
                printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
                            "and the kernel doesn't have the proper "
                            "workarounds compiled in. @@@@\n");
                bad_config = 1;
-#endif
                break;
        case K_SYS_REVISION_BCM1250_PASS2:
                /* Pass 2 - easiest as default for now - so many numbers */
index de30b0c887968c4b985c3c3ab4f9facd082905f0..6edb9ee6128ebc4d45de622afdf118f0bf5d12ee 100644 (file)
@@ -5,6 +5,7 @@ generic-y += cputime.h
 generic-y += exec.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
diff --git a/arch/mn10300/include/asm/mm-arch-hooks.h b/arch/mn10300/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index e2029a6..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_MN10300_MM_ARCH_HOOKS_H
-#define _ASM_MN10300_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_MN10300_MM_ARCH_HOOKS_H */
index 434639d510b3cba669461ca75303eab66782a15d..914864eb5a25daf87688f1565b33700b289be0bf 100644 (file)
@@ -30,6 +30,7 @@ generic-y += kmap_types.h
 generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += module.h
 generic-y += msgbuf.h
diff --git a/arch/nios2/include/asm/mm-arch-hooks.h b/arch/nios2/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index d7290dc..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_NIOS2_MM_ARCH_HOOKS_H
-#define _ASM_NIOS2_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_NIOS2_MM_ARCH_HOOKS_H */
index e5a693b16da2967ffc112acf0c7561d0e93d5e04..443f44de102093f18fe5d797b297757705a0b70c 100644 (file)
@@ -17,6 +17,7 @@ config OPENRISC
        select GENERIC_IRQ_SHOW
        select GENERIC_IOMAP
        select GENERIC_CPU_DEVICES
+       select HAVE_UID16
        select GENERIC_ATOMIC64
        select GENERIC_CLOCKEVENTS
        select GENERIC_STRNCPY_FROM_USER
@@ -31,9 +32,6 @@ config MMU
 config HAVE_DMA_ATTRS
        def_bool y
 
-config UID16
-       def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
index 2a2e39b8109abd5b503fc4ac3d6ce3e39fd615c9..2832f031fb11d0e86fe695ced404c41cf36ebf90 100644 (file)
@@ -36,6 +36,7 @@ generic-y += kmap_types.h
 generic-y += kvm_para.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += module.h
 generic-y += msgbuf.h
diff --git a/arch/openrisc/include/asm/mm-arch-hooks.h b/arch/openrisc/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 6d33cb5..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_OPENRISC_MM_ARCH_HOOKS_H
-#define _ASM_OPENRISC_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_OPENRISC_MM_ARCH_HOOKS_H */
index 12b341d04f88d09be325e318967f537153602d86..f9b3a81aefcdcb5e26cdeaa97026ffdf7622b3f8 100644 (file)
@@ -15,6 +15,7 @@ generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mutex.h
 generic-y += param.h
 generic-y += percpu.h
diff --git a/arch/parisc/include/asm/mm-arch-hooks.h b/arch/parisc/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 654ec63..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_PARISC_MM_ARCH_HOOKS_H
-#define _ASM_PARISC_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_PARISC_MM_ARCH_HOOKS_H */
index 3a08eae3318fe8da341a563613ae16e82e64912b..3edbb9fc91b4e055307d3c7519592a00201d988a 100644 (file)
@@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
-       if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
+       if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
                /*
                 * This is the permanent pmd attached to the pgd;
                 * cannot free it.
@@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
                 */
                mm_inc_nr_pmds(mm);
                return;
+       }
        free_pages((unsigned long)pmd, PMD_ORDER);
 }
 
index dc5385ebb071ac79c08b50c7a4c85b2b0e66f32a..5ad26dd94d77e83fedeba5c7f71c8eba0ff2ab29 100644 (file)
@@ -3,5 +3,6 @@
 generic-y += clkdev.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += trace_clock.h
index cfad7fca01d61d72942d98a37d18e5e0675980bc..d7697ab802f6c94813a27394baa255fa26a93ddc 100644 (file)
@@ -57,7 +57,10 @@ union ctlreg0 {
                unsigned long lap  : 1; /* Low-address-protection control */
                unsigned long      : 4;
                unsigned long edat : 1; /* Enhanced-DAT-enablement control */
-               unsigned long      : 23;
+               unsigned long      : 4;
+               unsigned long afp  : 1; /* AFP-register control */
+               unsigned long vx   : 1; /* Vector enablement control */
+               unsigned long      : 17;
        };
 };
 
index 0130d0379edd179e8b06dbd459066b8606517676..d9be7c0c1291445cfb2356d88b973398fea67b97 100644 (file)
@@ -14,6 +14,7 @@
 
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range                 free_pgd_range
+#define hugepages_supported()                  (MACHINE_HAS_HPAGE)
 
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t pte);
diff --git a/arch/s390/include/asm/mm-arch-hooks.h b/arch/s390/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 07680b2..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_S390_MM_ARCH_HOOKS_H
-#define _ASM_S390_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_S390_MM_ARCH_HOOKS_H */
index dd345238d9a7762becd9276602122d00b9e87ea5..53eacbd4f09bf4c32ac051f1be7f9d3e5445e0df 100644 (file)
 #define PAGE_DEFAULT_ACC       0
 #define PAGE_DEFAULT_KEY       (PAGE_DEFAULT_ACC << 4)
 
-#include <asm/setup.h>
-#ifndef __ASSEMBLY__
-
-extern int HPAGE_SHIFT;
+#define HPAGE_SHIFT    20
 #define HPAGE_SIZE     (1UL << HPAGE_SHIFT)
 #define HPAGE_MASK     (~(HPAGE_SIZE - 1))
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
@@ -30,6 +27,9 @@ extern int HPAGE_SHIFT;
 #define ARCH_HAS_PREPARE_HUGEPAGE
 #define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
 
+#include <asm/setup.h>
+#ifndef __ASSEMBLY__
+
 static inline void storage_key_init_range(unsigned long start, unsigned long end)
 {
 #if PAGE_DEFAULT_KEY
index 4cb19fe76dd98b13abfc02c9520e37460475e645..f897ec73dc8c9c02943a5c76254140bc951a6024 100644 (file)
@@ -87,7 +87,15 @@ struct sf_raw_sample {
 } __packed;
 
 /* Perf hardware reserve and release functions */
+#ifdef CONFIG_PERF_EVENTS
 int perf_reserve_sampling(void);
 void perf_release_sampling(void);
+#else /* CONFIG_PERF_EVENTS */
+static inline int perf_reserve_sampling(void)
+{
+       return 0;
+}
+static inline void perf_release_sampling(void) {}
+#endif /* CONFIG_PERF_EVENTS */
 
 #endif /* _ASM_S390_PERF_EVENT_H */
index c7d1b9d0901147ba359fa03e2e4e026699413fa2..a2da259d932741614c4f6b78642c491b3829b223 100644 (file)
 
 int main(void)
 {
-       DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
-       DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
-       DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
-       BLANK();
+       DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
+       DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
        DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
        BLANK();
-       DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause));
-       DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address));
-       DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid));
+       DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
+       DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
+       DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
+       DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
+       DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
        BLANK();
        DEFINE(__TI_task, offsetof(struct thread_info, task));
        DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
-       DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
        DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
        DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
        DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
index bff5e3b6d8223b8f9a9c84d8f99235d3d4e648bc..8ba32436effe4b9da0a271ae1ec0ad2a2bd689e7 100644 (file)
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
        union cache_topology ct;
        enum cache_type ctype;
 
+       if (!test_facility(34))
+               return -EOPNOTSUPP;
        if (!this_cpu_ci)
                return -EINVAL;
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
index 3238893c9d4ff66d492ce2a04421b85fa0cc0c34..84062e7a77dad75c50fa60822255ee2b20b34181 100644 (file)
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
  */
 ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
-       stg     %r15,__THREAD_ksp(%r2)          # store kernel stack of prev
-       lg      %r4,__THREAD_info(%r2)          # get thread_info of prev
-       lg      %r5,__THREAD_info(%r3)          # get thread_info of next
+       lgr     %r1,%r2
+       aghi    %r1,__TASK_thread               # thread_struct of prev task
+       lg      %r4,__TASK_thread_info(%r2)     # get thread_info of prev
+       lg      %r5,__TASK_thread_info(%r3)     # get thread_info of next
+       stg     %r15,__THREAD_ksp(%r1)          # store kernel stack of prev
+       lgr     %r1,%r3
+       aghi    %r1,__TASK_thread               # thread_struct of next task
        lgr     %r15,%r5
        aghi    %r15,STACK_INIT                 # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r5,__LC_THREAD_INFO            # store thread info of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
+       lg      %r15,__THREAD_ksp(%r1)          # load kernel stack of next
        lctl    %c4,%c4,__TASK_pid(%r3)         # load pid to control reg. 4
        mvc     __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
-       lg      %r15,__THREAD_ksp(%r3)          # load kernel stack of next
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        br      %r14
 
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
        LAST_BREAK %r14
        lg      %r15,__LC_KERNEL_STACK
        lg      %r14,__TI_task(%r12)
+       aghi    %r14,__TASK_thread      # pointer to thread_struct
        lghi    %r13,__LC_PGM_TDB
        tm      __LC_PGM_ILC+2,0x02     # check for transaction abort
        jz      2f
index 505c17c0ae1a67a93542c963ce800f7e8d9a11db..56b550893593a58a5ab07879a2fa3561cde009d4 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/nmi.h>
 #include <asm/crw.h>
 #include <asm/switch_to.h>
+#include <asm/ctl_reg.h>
 
 struct mcck_struct {
        int kill_task;
@@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
        } else
                asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
 
-       asm volatile(
-               "       ld      0,0(%0)\n"
-               "       ld      1,8(%0)\n"
-               "       ld      2,16(%0)\n"
-               "       ld      3,24(%0)\n"
-               "       ld      4,32(%0)\n"
-               "       ld      5,40(%0)\n"
-               "       ld      6,48(%0)\n"
-               "       ld      7,56(%0)\n"
-               "       ld      8,64(%0)\n"
-               "       ld      9,72(%0)\n"
-               "       ld      10,80(%0)\n"
-               "       ld      11,88(%0)\n"
-               "       ld      12,96(%0)\n"
-               "       ld      13,104(%0)\n"
-               "       ld      14,112(%0)\n"
-               "       ld      15,120(%0)\n"
-               : : "a" (fpt_save_area));
-       /* Revalidate vector registers */
-       if (MACHINE_HAS_VX && current->thread.vxrs) {
+       if (!MACHINE_HAS_VX) {
+               /* Revalidate floating point registers */
+               asm volatile(
+                       "       ld      0,0(%0)\n"
+                       "       ld      1,8(%0)\n"
+                       "       ld      2,16(%0)\n"
+                       "       ld      3,24(%0)\n"
+                       "       ld      4,32(%0)\n"
+                       "       ld      5,40(%0)\n"
+                       "       ld      6,48(%0)\n"
+                       "       ld      7,56(%0)\n"
+                       "       ld      8,64(%0)\n"
+                       "       ld      9,72(%0)\n"
+                       "       ld      10,80(%0)\n"
+                       "       ld      11,88(%0)\n"
+                       "       ld      12,96(%0)\n"
+                       "       ld      13,104(%0)\n"
+                       "       ld      14,112(%0)\n"
+                       "       ld      15,120(%0)\n"
+                       : : "a" (fpt_save_area));
+       } else {
+               /* Revalidate vector registers */
+               union ctlreg0 cr0;
+
                if (!mci->vr) {
                        /*
                         * Vector registers can't be restored and therefore
@@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
                         */
                        kill_task = 1;
                }
+               cr0.val = S390_lowcore.cregs_save_area[0];
+               cr0.afp = cr0.vx = 1;
+               __ctl_load(cr0.val, 0, 0);
                restore_vx_regs((__vector128 *)
-                               S390_lowcore.vector_save_area_addr);
+                               &S390_lowcore.vector_save_area);
+               __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
        }
        /* Revalidate access registers */
        asm volatile(
index dc5edc29b73aaf120bd462d93fd371c2803daf18..8f587d871b9f234bed189b9c568746930d1045f3 100644 (file)
@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
 asmlinkage void execve_tail(void)
 {
        current->thread.fp_regs.fpc = 0;
-       asm volatile("sfpc %0,%0" : : "d" (0));
+       asm volatile("sfpc %0" : : "d" (0));
 }
 
 /*
index 43c3169ea49c7d019543d8e2754dd3061fd95789..ada0c07fe1a8744ecf989093720422cac1429e66 100644 (file)
@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
        jno     .Lesa2
        ahi     %r15,-80
        stmh    %r6,%r15,96(%r15)               # store upper register halves
+       basr    %r13,0
+       lmh     %r0,%r15,.Lzeroes-.(%r13)       # clear upper register halves
 .Lesa2:
        lr      %r10,%r2                        # save string pointer
        lhi     %r2,0
@@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
 .Lesa3:
        lm      %r6,%r15,120(%r15)              # restore registers
        br      %r14
+.Lzeroes:
+       .fill   64,4,0
 
 .LwritedataS4:
        .long   0x00760005                      # SCLP command for write data
index f7f027caaaaacb33b279f76b33f3525374b6ae34..ca070d260af2e166b23b48553b2b6c02dc2a1046 100644 (file)
@@ -885,8 +885,6 @@ void __init setup_arch(char **cmdline_p)
         */
        setup_hwcaps();
 
-       HPAGE_SHIFT = MACHINE_HAS_HPAGE ? 20 : 0;
-
        /*
         * Create kernel page tables and switch to virtual addressing.
         */
index 4d96c9f5345538471cea4cd62620439c88a7f867..7bea81d8a3635025b0f372c3f908ade71b188e1b 100644 (file)
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
        }
 
        /* get vector interrupt code from fpc */
-       asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+       asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
        vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
        switch (vic) {
        case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
 
        location = get_trap_ip(regs);
 
-       asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
+       asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
        /* Check for vector register enablement */
        if (MACHINE_HAS_VX && !current->thread.vxrs &&
            (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
index 33082d0d101b59e41690435b7f35896cde14fe0f..b33f66110ca9401418f7d657b951d8bfb84d99c3 100644 (file)
@@ -31,8 +31,6 @@
 #define ALLOC_ORDER    2
 #define FRAG_MASK      0x03
 
-int HPAGE_SHIFT;
-
 unsigned long *crst_table_alloc(struct mm_struct *mm)
 {
        struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
index f6498eec9ee17baa66d63a3c71db729ed6f7a3da..f010c93a88b16c1d909069c14237ff389fa759df 100644 (file)
@@ -36,6 +36,8 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  *           |   BPF stack   |     |
  *           |               |     |
  *           +---------------+     |
+ *           | 8 byte skbp   |     |
+ * R15+170 -> +---------------+     |
  *           | 8 byte hlen   |     |
  * R15+168 -> +---------------+     |
  *           | 4 byte align  |     |
@@ -51,11 +53,12 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  * We get 160 bytes stack space from calling function, but only use
  * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
  */
-#define STK_SPACE      (MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_SPACE      (MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
 #define STK_160_UNUSED (160 - 12 * 8)
 #define STK_OFF                (STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP    160     /* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN   168     /* Offset of SKB header length on stack */
+#define STK_OFF_SKBP   170     /* Offset of SKB pointer on stack */
 
 #define STK_OFF_R6     (160 - 11 * 8)  /* Offset of r6 on stack */
 #define STK_OFF_TCCNT  (160 - 12 * 8)  /* Offset of tail_call_cnt on stack */
index fee782acc2ee51f6a3aae4b28152ec76981c8350..9f4bbc09bf07b634092aede3058d89226ef60f97 100644 (file)
@@ -45,7 +45,7 @@ struct bpf_jit {
        int labels[1];          /* Labels for local jumps */
 };
 
-#define BPF_SIZE_MAX   4096    /* Max size for program */
+#define BPF_SIZE_MAX   0x7ffff /* Max size for program (20 bit signed displ) */
 
 #define SEEN_SKB       1       /* skb access */
 #define SEEN_MEM       2       /* use mem[] for temporary storage */
@@ -53,6 +53,7 @@ struct bpf_jit {
 #define SEEN_LITERAL   8       /* code uses literals */
 #define SEEN_FUNC      16      /* calls C functions */
 #define SEEN_TAIL_CALL 32      /* code uses tail calls */
+#define SEEN_SKB_CHANGE        64      /* code changes skb data */
 #define SEEN_STACK     (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -203,19 +204,11 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
        _EMIT6(op1 | __disp, op2);                              \
 })
 
-#define EMIT6_DISP(op1, op2, b1, b2, b3, disp)                 \
-({                                                             \
-       _EMIT6_DISP(op1 | reg(b1, b2) << 16 |                   \
-                   reg_high(b3) << 8, op2, disp);              \
-       REG_SET_SEEN(b1);                                       \
-       REG_SET_SEEN(b2);                                       \
-       REG_SET_SEEN(b3);                                       \
-})
-
 #define _EMIT6_DISP_LH(op1, op2, disp)                         \
 ({                                                             \
-       unsigned int __disp_h = ((u32)disp) & 0xff000;          \
-       unsigned int __disp_l = ((u32)disp) & 0x00fff;          \
+       u32 _disp = (u32) disp;                                 \
+       unsigned int __disp_h = _disp & 0xff000;                \
+       unsigned int __disp_l = _disp & 0x00fff;                \
        _EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);            \
 })
 
@@ -389,13 +382,33 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
        } while (re <= 15);
 }
 
+/*
+ * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
+ * we store the SKB header length on the stack and the SKB data
+ * pointer in REG_SKB_DATA.
+ */
+static void emit_load_skb_data_hlen(struct bpf_jit *jit)
+{
+       /* Header length: llgf %w1,<len>(%b1) */
+       EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
+                     offsetof(struct sk_buff, len));
+       /* s %w1,<data_len>(%b1) */
+       EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
+                  offsetof(struct sk_buff, data_len));
+       /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+       EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
+       /* lg %skb_data,data_off(%b1) */
+       EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+                     BPF_REG_1, offsetof(struct sk_buff, data));
+}
+
 /*
  * Emit function prologue
  *
  * Save registers and create stack frame if necessary.
  * See stack frame layout desription in "bpf_jit.h"!
  */
-static void bpf_jit_prologue(struct bpf_jit *jit)
+static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
 {
        if (jit->seen & SEEN_TAIL_CALL) {
                /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -429,32 +442,21 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
                        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
                                      REG_15, 152);
        }
-       /*
-        * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
-        * we store the SKB header length on the stack and the SKB data
-        * pointer in REG_SKB_DATA.
-        */
-       if (jit->seen & SEEN_SKB) {
-               /* Header length: llgf %w1,<len>(%b1) */
-               EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
-                             offsetof(struct sk_buff, len));
-               /* s %w1,<data_len>(%b1) */
-               EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
-                          offsetof(struct sk_buff, data_len));
-               /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+       if (jit->seen & SEEN_SKB)
+               emit_load_skb_data_hlen(jit);
+       if (jit->seen & SEEN_SKB_CHANGE)
+               /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
                EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
-                             STK_OFF_HLEN);
-               /* lg %skb_data,data_off(%b1) */
-               EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
-                             BPF_REG_1, offsetof(struct sk_buff, data));
+                             STK_OFF_SKBP);
+       /* Clear A (%b0) and X (%b7) registers for converted BPF programs */
+       if (is_classic) {
+               if (REG_SEEN(BPF_REG_A))
+                       /* lghi %ba,0 */
+                       EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+               if (REG_SEEN(BPF_REG_X))
+                       /* lghi %bx,0 */
+                       EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
        }
-       /* BPF compatibility: clear A (%b7) and X (%b8) registers */
-       if (REG_SEEN(BPF_REG_7))
-               /* lghi %b7,0 */
-               EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
-       if (REG_SEEN(BPF_REG_8))
-               /* lghi %b8,0 */
-               EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
 }
 
 /*
@@ -976,12 +978,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                REG_SET_SEEN(BPF_REG_5);
                jit->seen |= SEEN_FUNC;
                /* lg %w1,<d(imm)>(%l) */
-               EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
-                          EMIT_CONST_U64(func));
+               EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+                             EMIT_CONST_U64(func));
                /* basr %r14,%w1 */
                EMIT2(0x0d00, REG_14, REG_W1);
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
+               if (bpf_helper_changes_skb_data((void *)func)) {
+                       jit->seen |= SEEN_SKB_CHANGE;
+                       /* lg %b1,ST_OFF_SKBP(%r15) */
+                       EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
+                                     REG_15, STK_OFF_SKBP);
+                       emit_load_skb_data_hlen(jit);
+               }
                break;
        }
        case BPF_JMP | BPF_CALL | BPF_X:
@@ -1236,7 +1245,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
        jit->lit = jit->lit_start;
        jit->prg = 0;
 
-       bpf_jit_prologue(jit);
+       bpf_jit_prologue(jit, bpf_prog_was_classic(fp));
        for (i = 0; i < fp->len; i += insn_count) {
                insn_count = bpf_jit_insn(jit, fp, i);
                if (insn_count < 0)
index bc927a09a172b6d6699cf60610cde600d4c73e74..9cfa2ffaa9d6bb02dd29cb01d895a0feaf63bdb0 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/fs.h>
 #include <linux/module.h>
 #include <asm/processor.h>
+#include <asm/perf_event.h>
 
 #include "../../../drivers/oprofile/oprof.h"
 
index 138fb3db45ba66ee03b7471025b4881af80cc874..92ffe397b893c553c8504f10525a5e3d9d1e9e34 100644 (file)
@@ -7,6 +7,7 @@ generic-y += clkdev.h
 generic-y += cputime.h
 generic-y += irq_work.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += preempt.h
 generic-y += sections.h
 generic-y += trace_clock.h
diff --git a/arch/score/include/asm/mm-arch-hooks.h b/arch/score/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 5e38689..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_SCORE_MM_ARCH_HOOKS_H
-#define _ASM_SCORE_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_SCORE_MM_ARCH_HOOKS_H */
index 9ac4626e72844b3fe6e4f659b75ffc01fe375610..aac452b26aa896a00cd59c65977739ed078d9201 100644 (file)
@@ -16,6 +16,7 @@ generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += msgbuf.h
 generic-y += param.h
diff --git a/arch/sh/include/asm/mm-arch-hooks.h b/arch/sh/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 1808729..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_SH_MM_ARCH_HOOKS_H
-#define _ASM_SH_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_SH_MM_ARCH_HOOKS_H */
index 2b2a69dcc467cbdb7e9aa895fe1a98acfb86ce4d..e928618838bc53c83555cc7b275073fc8c745b3d 100644 (file)
@@ -12,6 +12,7 @@ generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += module.h
 generic-y += mutex.h
 generic-y += preempt.h
diff --git a/arch/sparc/include/asm/mm-arch-hooks.h b/arch/sparc/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index b89ba44..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_SPARC_MM_ARCH_HOOKS_H
-#define _ASM_SPARC_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_SPARC_MM_ARCH_HOOKS_H */
index 7931eeeb649af45af45aaa49a20fa727a6aecd40..f8b9f71b9a2b631816df61ff9b95657786e7cd51 100644 (file)
@@ -807,7 +807,7 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf];
        }
 
        if (bpf_jit_enable > 1)
-               bpf_jit_dump(flen, proglen, pass, image);
+               bpf_jit_dump(flen, proglen, pass + 1, image);
 
        if (image) {
                bpf_flush_icache(image, image + proglen);
index d53654488c2c1529d0a3aede5836eeb11b3bcde5..d8a84316347127a48a7663ac833ab6d368d9118e 100644 (file)
@@ -19,6 +19,7 @@ generic-y += irq_regs.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += param.h
diff --git a/arch/tile/include/asm/mm-arch-hooks.h b/arch/tile/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index d1709ea..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_TILE_MM_ARCH_HOOKS_H
-#define _ASM_TILE_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_TILE_MM_ARCH_HOOKS_H */
index 99c9ff87e0187502179f8012afa5bd54309d2eb9..6b755d125783f45b05de02e3fe5f7b5e32d1af45 100644 (file)
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
 
 void __init free_initrd_mem(unsigned long begin, unsigned long end)
 {
-       free_bootmem(__pa(begin), end - begin);
+       free_bootmem_late(__pa(begin), end - begin);
 }
 
 static int __init setup_initrd(char *str)
index 3d63ff6f583ff406c98bce4014900669cec513bf..149ec55f9c46abd97cbb9b69c7a55afa23e23393 100644 (file)
@@ -16,6 +16,7 @@ generic-y += irq_regs.h
 generic-y += irq_work.h
 generic-y += kdebug.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mutex.h
 generic-y += param.h
 generic-y += pci.h
diff --git a/arch/um/include/asm/mm-arch-hooks.h b/arch/um/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index a7c8b0d..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_UM_MM_ARCH_HOOKS_H
-#define _ASM_UM_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_UM_MM_ARCH_HOOKS_H */
index d12b377b5a8b6b8e8cea9c9df92eef250ed3bf49..1fc7a286dc6f342319ec06a81b53a087b9708ef9 100644 (file)
@@ -26,6 +26,7 @@ generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += local.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += mman.h
 generic-y += module.h
 generic-y += msgbuf.h
diff --git a/arch/unicore32/include/asm/mm-arch-hooks.h b/arch/unicore32/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 4d79a85..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_UNICORE32_MM_ARCH_HOOKS_H
-#define _ASM_UNICORE32_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_UNICORE32_MM_ARCH_HOOKS_H */
index 3dbb7e7909ca50bc45d44e7a0fbc18d79e3cdb70..b3a1a5d77d92c2fd1d1d1a3b0b0b898944e8cd5b 100644 (file)
@@ -41,6 +41,7 @@ config X86
        select ARCH_USE_CMPXCHG_LOCKREF         if X86_64
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS
+       select ARCH_WANTS_DYNAMIC_TASK_STRUCT
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_IPC_PARSE_VERSION      if X86_32
        select ARCH_WANT_OPTIONAL_GPIOLIB
index a15893d17c55988b542ad362f8c0074958130587..d8c0d3266173d96f84ac543885ba0bd63c0c3e47 100644 (file)
@@ -297,6 +297,18 @@ config OPTIMIZE_INLINING
 
          If unsure, say N.
 
+config DEBUG_ENTRY
+       bool "Debug low-level entry code"
+       depends on DEBUG_KERNEL
+       ---help---
+         This option enables sanity checks in x86's low-level entry code.
+         Some of these sanity checks may slow down kernel entries and
+         exits or otherwise impact performance.
+
+         This is currently used to help test NMI code.
+
+         If unsure, say N.
+
 config DEBUG_NMI_SELFTEST
        bool "NMI Selftest"
        depends on DEBUG_KERNEL && X86_LOCAL_APIC
index 3bb2c4302df1f8ba18e21b997fd5b51e8ec55c54..8cb3e438f21e8558aad8d5b409af181fabaaf5ad 100644 (file)
@@ -1237,11 +1237,12 @@ ENTRY(nmi)
         *  If the variable is not set and the stack is not the NMI
         *  stack then:
         *    o Set the special variable on the stack
-        *    o Copy the interrupt frame into a "saved" location on the stack
-        *    o Copy the interrupt frame into a "copy" location on the stack
+        *    o Copy the interrupt frame into an "outermost" location on the
+        *      stack
+        *    o Copy the interrupt frame into an "iret" location on the stack
         *    o Continue processing the NMI
         *  If the variable is set or the previous stack is the NMI stack:
-        *    o Modify the "copy" location to jump to the repeate_nmi
+        *    o Modify the "iret" location to jump to the repeat_nmi
         *    o return back to the first NMI
         *
         * Now on exit of the first NMI, we first clear the stack variable
@@ -1250,31 +1251,151 @@ ENTRY(nmi)
         * a nested NMI that updated the copy interrupt stack frame, a
         * jump will be made to the repeat_nmi code that will handle the second
         * NMI.
+        *
+        * However, espfix prevents us from directly returning to userspace
+        * with a single IRET instruction.  Similarly, IRET to user mode
+        * can fault.  We therefore handle NMIs from user space like
+        * other IST entries.
         */
 
        /* Use %rdx as our temp variable throughout */
        pushq   %rdx
 
+       testb   $3, CS-RIP+8(%rsp)
+       jz      .Lnmi_from_kernel
+
+       /*
+        * NMI from user mode.  We need to run on the thread stack, but we
+        * can't go through the normal entry paths: NMIs are masked, and
+        * we don't want to enable interrupts, because then we'll end
+        * up in an awkward situation in which IRQs are on but NMIs
+        * are off.
+        */
+
+       SWAPGS
+       cld
+       movq    %rsp, %rdx
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+       pushq   5*8(%rdx)       /* pt_regs->ss */
+       pushq   4*8(%rdx)       /* pt_regs->rsp */
+       pushq   3*8(%rdx)       /* pt_regs->flags */
+       pushq   2*8(%rdx)       /* pt_regs->cs */
+       pushq   1*8(%rdx)       /* pt_regs->rip */
+       pushq   $-1             /* pt_regs->orig_ax */
+       pushq   %rdi            /* pt_regs->di */
+       pushq   %rsi            /* pt_regs->si */
+       pushq   (%rdx)          /* pt_regs->dx */
+       pushq   %rcx            /* pt_regs->cx */
+       pushq   %rax            /* pt_regs->ax */
+       pushq   %r8             /* pt_regs->r8 */
+       pushq   %r9             /* pt_regs->r9 */
+       pushq   %r10            /* pt_regs->r10 */
+       pushq   %r11            /* pt_regs->r11 */
+       pushq   %rbx            /* pt_regs->rbx */
+       pushq   %rbp            /* pt_regs->rbp */
+       pushq   %r12            /* pt_regs->r12 */
+       pushq   %r13            /* pt_regs->r13 */
+       pushq   %r14            /* pt_regs->r14 */
+       pushq   %r15            /* pt_regs->r15 */
+
+       /*
+        * At this point we no longer need to worry about stack damage
+        * due to nesting -- we're on the normal thread stack and we're
+        * done with the NMI stack.
+        */
+
+       movq    %rsp, %rdi
+       movq    $-1, %rsi
+       call    do_nmi
+
+       /*
+        * Return back to user mode.  We must *not* do the normal exit
+        * work, because we don't want to enable interrupts.  Fortunately,
+        * do_nmi doesn't modify pt_regs.
+        */
+       SWAPGS
+       jmp     restore_c_regs_and_iret
+
+.Lnmi_from_kernel:
+       /*
+        * Here's what our stack frame will look like:
+        * +---------------------------------------------------------+
+        * | original SS                                             |
+        * | original Return RSP                                     |
+        * | original RFLAGS                                         |
+        * | original CS                                             |
+        * | original RIP                                            |
+        * +---------------------------------------------------------+
+        * | temp storage for rdx                                    |
+        * +---------------------------------------------------------+
+        * | "NMI executing" variable                                |
+        * +---------------------------------------------------------+
+        * | iret SS          } Copied from "outermost" frame        |
+        * | iret Return RSP  } on each loop iteration; overwritten  |
+        * | iret RFLAGS      } by a nested NMI to force another     |
+        * | iret CS          } iteration if needed.                 |
+        * | iret RIP         }                                      |
+        * +---------------------------------------------------------+
+        * | outermost SS          } initialized in first_nmi;       |
+        * | outermost Return RSP  } will not be changed before      |
+        * | outermost RFLAGS      } NMI processing is done.         |
+        * | outermost CS          } Copied to "iret" frame on each  |
+        * | outermost RIP         } iteration.                      |
+        * +---------------------------------------------------------+
+        * | pt_regs                                                 |
+        * +---------------------------------------------------------+
+        *
+        * The "original" frame is used by hardware.  Before re-enabling
+        * NMIs, we need to be done with it, and we need to leave enough
+        * space for the asm code here.
+        *
+        * We return by executing IRET while RSP points to the "iret" frame.
+        * That will either return for real or it will loop back into NMI
+        * processing.
+        *
+        * The "outermost" frame is copied to the "iret" frame on each
+        * iteration of the loop, so each iteration starts with the "iret"
+        * frame pointing to the final return target.
+        */
+
        /*
-        * If %cs was not the kernel segment, then the NMI triggered in user
-        * space, which means it is definitely not nested.
+        * Determine whether we're a nested NMI.
+        *
+        * If we interrupted kernel code between repeat_nmi and
+        * end_repeat_nmi, then we are a nested NMI.  We must not
+        * modify the "iret" frame because it's being written by
+        * the outer NMI.  That's okay; the outer NMI handler is
+        * about to about to call do_nmi anyway, so we can just
+        * resume the outer NMI.
         */
-       cmpl    $__KERNEL_CS, 16(%rsp)
-       jne     first_nmi
+
+       movq    $repeat_nmi, %rdx
+       cmpq    8(%rsp), %rdx
+       ja      1f
+       movq    $end_repeat_nmi, %rdx
+       cmpq    8(%rsp), %rdx
+       ja      nested_nmi_out
+1:
 
        /*
-        * Check the special variable on the stack to see if NMIs are
-        * executing.
+        * Now check "NMI executing".  If it's set, then we're nested.
+        * This will not detect if we interrupted an outer NMI just
+        * before IRET.
         */
        cmpl    $1, -8(%rsp)
        je      nested_nmi
 
        /*
-        * Now test if the previous stack was an NMI stack.
-        * We need the double check. We check the NMI stack to satisfy the
-        * race when the first NMI clears the variable before returning.
-        * We check the variable because the first NMI could be in a
-        * breakpoint routine using a breakpoint stack.
+        * Now test if the previous stack was an NMI stack.  This covers
+        * the case where we interrupt an outer NMI after it clears
+        * "NMI executing" but before IRET.  We need to be careful, though:
+        * there is one case in which RSP could point to the NMI stack
+        * despite there being no NMI active: naughty userspace controls
+        * RSP at the very beginning of the SYSCALL targets.  We can
+        * pull a fast one on naughty userspace, though: we program
+        * SYSCALL to mask DF, so userspace cannot cause DF to be set
+        * if it controls the kernel's RSP.  We set DF before we clear
+        * "NMI executing".
         */
        lea     6*8(%rsp), %rdx
        /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
@@ -1286,25 +1407,20 @@ ENTRY(nmi)
        cmpq    %rdx, 4*8(%rsp)
        /* If it is below the NMI stack, it is a normal NMI */
        jb      first_nmi
-       /* Ah, it is within the NMI stack, treat it as nested */
+
+       /* Ah, it is within the NMI stack. */
+
+       testb   $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
+       jz      first_nmi       /* RSP was user controlled. */
+
+       /* This is a nested NMI. */
 
 nested_nmi:
        /*
-        * Do nothing if we interrupted the fixup in repeat_nmi.
-        * It's about to repeat the NMI handler, so we are fine
-        * with ignoring this one.
+        * Modify the "iret" frame to point to repeat_nmi, forcing another
+        * iteration of NMI handling.
         */
-       movq    $repeat_nmi, %rdx
-       cmpq    8(%rsp), %rdx
-       ja      1f
-       movq    $end_repeat_nmi, %rdx
-       cmpq    8(%rsp), %rdx
-       ja      nested_nmi_out
-
-1:
-       /* Set up the interrupted NMIs stack to jump to repeat_nmi */
-       leaq    -1*8(%rsp), %rdx
-       movq    %rdx, %rsp
+       subq    $8, %rsp
        leaq    -10*8(%rsp), %rdx
        pushq   $__KERNEL_DS
        pushq   %rdx
@@ -1318,61 +1434,42 @@ nested_nmi:
 nested_nmi_out:
        popq    %rdx
 
-       /* No need to check faults here */
+       /* We are returning to kernel mode, so this cannot result in a fault. */
        INTERRUPT_RETURN
 
 first_nmi:
-       /*
-        * Because nested NMIs will use the pushed location that we
-        * stored in rdx, we must keep that space available.
-        * Here's what our stack frame will look like:
-        * +-------------------------+
-        * | original SS             |
-        * | original Return RSP     |
-        * | original RFLAGS         |
-        * | original CS             |
-        * | original RIP            |
-        * +-------------------------+
-        * | temp storage for rdx    |
-        * +-------------------------+
-        * | NMI executing variable  |
-        * +-------------------------+
-        * | copied SS               |
-        * | copied Return RSP       |
-        * | copied RFLAGS           |
-        * | copied CS               |
-        * | copied RIP              |
-        * +-------------------------+
-        * | Saved SS                |
-        * | Saved Return RSP        |
-        * | Saved RFLAGS            |
-        * | Saved CS                |
-        * | Saved RIP               |
-        * +-------------------------+
-        * | pt_regs                 |
-        * +-------------------------+
-        *
-        * The saved stack frame is used to fix up the copied stack frame
-        * that a nested NMI may change to make the interrupted NMI iret jump
-        * to the repeat_nmi. The original stack frame and the temp storage
-        * is also used by nested NMIs and can not be trusted on exit.
-        */
-       /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
+       /* Restore rdx. */
        movq    (%rsp), %rdx
 
-       /* Set the NMI executing variable on the stack. */
-       pushq   $1
+       /* Make room for "NMI executing". */
+       pushq   $0
 
-       /* Leave room for the "copied" frame */
+       /* Leave room for the "iret" frame */
        subq    $(5*8), %rsp
 
-       /* Copy the stack frame to the Saved frame */
+       /* Copy the "original" frame to the "outermost" frame */
        .rept 5
        pushq   11*8(%rsp)
        .endr
 
        /* Everything up to here is safe from nested NMIs */
 
+#ifdef CONFIG_DEBUG_ENTRY
+       /*
+        * For ease of testing, unmask NMIs right away.  Disabled by
+        * default because IRET is very expensive.
+        */
+       pushq   $0              /* SS */
+       pushq   %rsp            /* RSP (minus 8 because of the previous push) */
+       addq    $8, (%rsp)      /* Fix up RSP */
+       pushfq                  /* RFLAGS */
+       pushq   $__KERNEL_CS    /* CS */
+       pushq   $1f             /* RIP */
+       INTERRUPT_RETURN        /* continues at repeat_nmi below */
+1:
+#endif
+
+repeat_nmi:
        /*
         * If there was a nested NMI, the first NMI's iret will return
         * here. But NMIs are still enabled and we can take another
@@ -1381,16 +1478,20 @@ first_nmi:
         * it will just return, as we are about to repeat an NMI anyway.
         * This makes it safe to copy to the stack frame that a nested
         * NMI will update.
+        *
+        * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
+        * we're repeating an NMI, gsbase has the same value that it had on
+        * the first iteration.  paranoid_entry will load the kernel
+        * gsbase if needed before we call do_nmi.  "NMI executing"
+        * is zero.
         */
-repeat_nmi:
+       movq    $1, 10*8(%rsp)          /* Set "NMI executing". */
+
        /*
-        * Update the stack variable to say we are still in NMI (the update
-        * is benign for the non-repeat case, where 1 was pushed just above
-        * to this very stack slot).
+        * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
+        * here must not modify the "iret" frame while we're writing to
+        * it or it will end up containing garbage.
         */
-       movq    $1, 10*8(%rsp)
-
-       /* Make another copy, this one may be modified by nested NMIs */
        addq    $(10*8), %rsp
        .rept 5
        pushq   -6*8(%rsp)
@@ -1399,9 +1500,9 @@ repeat_nmi:
 end_repeat_nmi:
 
        /*
-        * Everything below this point can be preempted by a nested
-        * NMI if the first NMI took an exception and reset our iret stack
-        * so that we repeat another NMI.
+        * Everything below this point can be preempted by a nested NMI.
+        * If this happens, then the inner NMI will change the "iret"
+        * frame to point back to repeat_nmi.
         */
        pushq   $-1                             /* ORIG_RAX: no syscall to restart */
        ALLOC_PT_GPREGS_ON_STACK
@@ -1415,28 +1516,11 @@ end_repeat_nmi:
         */
        call    paranoid_entry
 
-       /*
-        * Save off the CR2 register. If we take a page fault in the NMI then
-        * it could corrupt the CR2 value. If the NMI preempts a page fault
-        * handler before it was able to read the CR2 register, and then the
-        * NMI itself takes a page fault, the page fault that was preempted
-        * will read the information from the NMI page fault and not the
-        * origin fault. Save it off and restore it if it changes.
-        * Use the r12 callee-saved register.
-        */
-       movq    %cr2, %r12
-
        /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
        movq    %rsp, %rdi
        movq    $-1, %rsi
        call    do_nmi
 
-       /* Did the NMI take a page fault? Restore cr2 if it did */
-       movq    %cr2, %rcx
-       cmpq    %rcx, %r12
-       je      1f
-       movq    %r12, %cr2
-1:
        testl   %ebx, %ebx                      /* swapgs needed? */
        jnz     nmi_restore
 nmi_swapgs:
@@ -1444,11 +1528,26 @@ nmi_swapgs:
 nmi_restore:
        RESTORE_EXTRA_REGS
        RESTORE_C_REGS
-       /* Pop the extra iret frame at once */
+
+       /* Point RSP at the "iret" frame. */
        REMOVE_PT_GPREGS_FROM_STACK 6*8
 
-       /* Clear the NMI executing stack variable */
-       movq    $0, 5*8(%rsp)
+       /*
+        * Clear "NMI executing".  Set DF first so that we can easily
+        * distinguish the remaining code between here and IRET from
+        * the SYSCALL entry and exit paths.  On a native kernel, we
+        * could just inspect RIP, but, on paravirt kernels,
+        * INTERRUPT_RETURN can translate into a jump into a
+        * hypercall page.
+        */
+       std
+       movq    $0, 5*8(%rsp)           /* clear "NMI executing" */
+
+       /*
+        * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
+        * stack in a single instruction.  We are returning to kernel
+        * mode, so this cannot result in a fault.
+        */
        INTERRUPT_RETURN
 END(nmi)
 
index bb187a6a877cc62666afb43421eb7e5e8b6fbbfe..5a1844765a7aba6dab47b878daf6eb723c044c03 100644 (file)
@@ -205,7 +205,6 @@ sysexit_from_sys_call:
        movl    RDX(%rsp), %edx         /* arg3 */
        movl    RSI(%rsp), %ecx         /* arg4 */
        movl    RDI(%rsp), %r8d         /* arg5 */
-       movl    %ebp, %r9d              /* arg6 */
        .endm
 
        .macro auditsys_exit exit
@@ -236,6 +235,7 @@ sysexit_from_sys_call:
 
 sysenter_auditsys:
        auditsys_entry_common
+       movl    %ebp, %r9d              /* reload 6th syscall arg */
        jmp     sysenter_dispatch
 
 sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
         * 32-bit zero extended:
         */
        ASM_STAC
-1:     movl    (%r8), %ebp
+1:     movl    (%r8), %r9d
        _ASM_EXTABLE(1b, ia32_badarg)
        ASM_CLAC
        orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
 cstar_do_call:
        /* 32-bit syscall -> 64-bit C ABI argument conversion */
        movl    %edi, %r8d              /* arg5 */
-       movl    %ebp, %r9d              /* arg6 */
+       /* r9 already loaded */         /* arg6 */
        xchg    %ecx, %esi              /* rsi:arg2, rcx:arg4 */
        movl    %ebx, %edi              /* arg1 */
        movl    %edx, %edx              /* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
        call    *ia32_sys_call_table(, %rax, 8)
        movq    %rax, RAX(%rsp)
 1:
-       movl    RCX(%rsp), %ebp
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -392,7 +391,9 @@ sysretl_from_sys_call:
 
 #ifdef CONFIG_AUDITSYSCALL
 cstar_auditsys:
+       movl    %r9d, R9(%rsp)          /* register to be clobbered by call */
        auditsys_entry_common
+       movl    R9(%rsp), %r9d          /* reload 6th syscall arg */
        jmp     cstar_dispatch
 
 sysretl_audit:
@@ -404,14 +405,16 @@ cstar_tracesys:
        testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
        jz      cstar_auditsys
 #endif
+       xchgl   %r9d, %ebp
        SAVE_EXTRA_REGS
        xorl    %eax, %eax              /* Do not leak kernel information */
        movq    %rax, R11(%rsp)
        movq    %rax, R10(%rsp)
-       movq    %rax, R9(%rsp)
+       movq    %r9, R9(%rsp)
        movq    %rax, R8(%rsp)
        movq    %rsp, %rdi              /* &pt_regs -> arg1 */
        call    syscall_trace_enter
+       movl    R9(%rsp), %r9d
 
        /* Reload arg registers from stack. (see sysenter_tracesys) */
        movl    RCX(%rsp), %ecx
@@ -421,6 +424,7 @@ cstar_tracesys:
        movl    %eax, %eax              /* zero extension */
 
        RESTORE_EXTRA_REGS
+       xchgl   %ebp, %r9d
        jmp     cstar_do_call
 END(entry_SYSCALL_compat)
 
index 4dd1f2d770af41f6d9c16688af6c0c65b2d9554d..aeac434c9febd6e6cd4b5c595288e4b2eb2c9f12 100644 (file)
@@ -9,3 +9,4 @@ generic-y += cputime.h
 generic-y += dma-contiguous.h
 generic-y += early_ioremap.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
index 0637826292deae64c7e6f0cbe10f4a6af2056cd5..c49c5173158e743b985c4e29f09f040811db6a48 100644 (file)
@@ -189,6 +189,7 @@ union fpregs_state {
        struct fxregs_state             fxsave;
        struct swregs_state             soft;
        struct xregs_state              xsave;
+       u8 __padding[PAGE_SIZE];
 };
 
 /*
@@ -197,40 +198,6 @@ union fpregs_state {
  * state fields:
  */
 struct fpu {
-       /*
-        * @state:
-        *
-        * In-memory copy of all FPU registers that we save/restore
-        * over context switches. If the task is using the FPU then
-        * the registers in the FPU are more recent than this state
-        * copy. If the task context-switches away then they get
-        * saved here and represent the FPU state.
-        *
-        * After context switches there may be a (short) time period
-        * during which the in-FPU hardware registers are unchanged
-        * and still perfectly match this state, if the tasks
-        * scheduled afterwards are not using the FPU.
-        *
-        * This is the 'lazy restore' window of optimization, which
-        * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
-        *
-        * We detect whether a subsequent task uses the FPU via setting
-        * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
-        *
-        * During this window, if the task gets scheduled again, we
-        * might be able to skip having to do a restore from this
-        * memory buffer to the hardware registers - at the cost of
-        * incurring the overhead of #NM fault traps.
-        *
-        * Note that on modern CPUs that support the XSAVEOPT (or other
-        * optimized XSAVE instructions), we don't use #NM traps anymore,
-        * as the hardware can track whether FPU registers need saving
-        * or not. On such CPUs we activate the non-lazy ('eagerfpu')
-        * logic, which unconditionally saves/restores all FPU state
-        * across context switches. (if FPU state exists.)
-        */
-       union fpregs_state              state;
-
        /*
         * @last_cpu:
         *
@@ -288,6 +255,43 @@ struct fpu {
         * deal with bursty apps that only use the FPU for a short time:
         */
        unsigned char                   counter;
+       /*
+        * @state:
+        *
+        * In-memory copy of all FPU registers that we save/restore
+        * over context switches. If the task is using the FPU then
+        * the registers in the FPU are more recent than this state
+        * copy. If the task context-switches away then they get
+        * saved here and represent the FPU state.
+        *
+        * After context switches there may be a (short) time period
+        * during which the in-FPU hardware registers are unchanged
+        * and still perfectly match this state, if the tasks
+        * scheduled afterwards are not using the FPU.
+        *
+        * This is the 'lazy restore' window of optimization, which
+        * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
+        *
+        * We detect whether a subsequent task uses the FPU via setting
+        * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
+        *
+        * During this window, if the task gets scheduled again, we
+        * might be able to skip having to do a restore from this
+        * memory buffer to the hardware registers - at the cost of
+        * incurring the overhead of #NM fault traps.
+        *
+        * Note that on modern CPUs that support the XSAVEOPT (or other
+        * optimized XSAVE instructions), we don't use #NM traps anymore,
+        * as the hardware can track whether FPU registers need saving
+        * or not. On such CPUs we activate the non-lazy ('eagerfpu')
+        * logic, which unconditionally saves/restores all FPU state
+        * across context switches. (if FPU state exists.)
+        */
+       union fpregs_state              state;
+       /*
+        * WARNING: 'state' is dynamically-sized.  Do not put
+        * anything after it here.
+        */
 };
 
 #endif /* _ASM_X86_FPU_H */
index 200ec2e7821d1922cb1f15ddaaf836e064dc025e..cd0310e186f4af534b7e21312b81d092ff9ba8cd 100644 (file)
 
 #if IS_ENABLED(CONFIG_INTEL_PMC_IPC)
 
-/*
- * intel_pmc_ipc_simple_command
- * @cmd: command
- * @sub: sub type
- */
 int intel_pmc_ipc_simple_command(int cmd, int sub);
-
-/*
- * intel_pmc_ipc_raw_cmd
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
- * @sptr: data writing to SPTR register
- * @dptr: data writing to DPTR register
- */
 int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen,
                u32 *out, u32 outlen, u32 dptr, u32 sptr);
-
-/*
- * intel_pmc_ipc_command
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
- */
 int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
                u32 *out, u32 outlen);
 
index 2a7f5d782c332d1965ac1c5a23a33289cfce7352..49ec9038ec14102a286c9b4bed126a6825613439 100644 (file)
@@ -604,6 +604,8 @@ struct kvm_arch {
        bool iommu_noncoherent;
 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
        atomic_t noncoherent_dma_count;
+#define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+       atomic_t assigned_device_count;
        struct kvm_pic *vpic;
        struct kvm_ioapic *vioapic;
        struct kvm_pit *vpit;
diff --git a/arch/x86/include/asm/mm-arch-hooks.h b/arch/x86/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index 4e881a3..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_X86_MM_ARCH_HOOKS_H
-#define _ASM_X86_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_X86_MM_ARCH_HOOKS_H */
index 5e8daee7c5c94be6fc48bc7f32e593fb46948557..804a3a6030ca046500e7a092cfa0a854be77796f 100644 (file)
@@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available;
 
 static inline void load_mm_cr4(struct mm_struct *mm)
 {
-       if (static_key_true(&rdpmc_always_available) ||
+       if (static_key_false(&rdpmc_always_available) ||
            atomic_read(&mm->context.perf_rdpmc_allowed))
                cr4_set_bits(X86_CR4_PCE);
        else
index 43e6519df0d507429a9533b51c7a28f2c0f3b90b..944f1785ed0dafac2a9a888e3aa75120d495eb70 100644 (file)
@@ -390,9 +390,6 @@ struct thread_struct {
 #endif
        unsigned long           gs;
 
-       /* Floating point and extended processor state */
-       struct fpu              fpu;
-
        /* Save middle states of ptrace breakpoints */
        struct perf_event       *ptrace_bps[HBP_NUM];
        /* Debug status used for traps, single steps, etc... */
@@ -418,6 +415,13 @@ struct thread_struct {
        unsigned long           iopl;
        /* Max allowed port in the bitmap, in bytes: */
        unsigned                io_bitmap_max;
+
+       /* Floating point and extended processor state */
+       struct fpu              fpu;
+       /*
+        * WARNING: 'fpu' is dynamically-sized.  It *MUST* be at
+        * the end.
+        */
 };
 
 /*
index 8fba544e9cc4164261f76c6c5d894b80a92f0377..f36d56bd76324543f6f0c208bea569ea96e6e802 100644 (file)
 #define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE          (1 << 4)
 /* Support for a virtual guest idle state is available */
 #define HV_X64_GUEST_IDLE_STATE_AVAILABLE              (1 << 5)
+/* Guest crash data handler available */
+#define HV_X64_GUEST_CRASH_MSR_AVAILABLE               (1 << 10)
 
 /*
  * Implementation recommendations. Indicates which behaviors the hypervisor
index a4ae82eb82aa8485146aee83376ba26fb9306afe..cd54147cb365fb3622121c782eee8e7fb3ddcc77 100644 (file)
@@ -354,7 +354,7 @@ struct kvm_xcrs {
 struct kvm_sync_regs {
 };
 
-#define KVM_QUIRK_LINT0_REENABLED      (1 << 0)
-#define KVM_QUIRK_CD_NW_CLEARED                (1 << 1)
+#define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
 
 #endif /* _ASM_X86_KVM_H */
index 188076161c1be51afe135d6289b8ce0e96952bf3..63eb68b73589bcbbc21f9c526193adca0de2e52d 100644 (file)
@@ -951,6 +951,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
        if (!cqm_group_leader(event))
                return 0;
 
+       /*
+        * Getting up-to-date values requires an SMP IPI which is not
+        * possible if we're being called in interrupt context. Return
+        * the cached values instead.
+        */
+       if (unlikely(in_interrupt()))
+               goto out;
+
        /*
         * Notice that we don't perform the reading of an RMID
         * atomically, because we can't hold a spin lock across the
index 32826791e6757203b5440dad36f5e80b3fc8fbe9..1e173f6285c73b76b2e6ab41daed7681406c5d15 100644 (file)
@@ -4,6 +4,8 @@
 #include <asm/fpu/internal.h>
 #include <asm/tlbflush.h>
 
+#include <linux/sched.h>
+
 /*
  * Initialize the TS bit in CR0 according to the style of context-switches
  * we are using:
@@ -136,6 +138,43 @@ static void __init fpu__init_system_generic(void)
 unsigned int xstate_size;
 EXPORT_SYMBOL_GPL(xstate_size);
 
+/* Enforce that 'MEMBER' is the last field of 'TYPE': */
+#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
+       BUILD_BUG_ON(sizeof(TYPE) != offsetofend(TYPE, MEMBER))
+
+/*
+ * We append the 'struct fpu' to the task_struct:
+ */
+static void __init fpu__init_task_struct_size(void)
+{
+       int task_size = sizeof(struct task_struct);
+
+       /*
+        * Subtract off the static size of the register state.
+        * It potentially has a bunch of padding.
+        */
+       task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state);
+
+       /*
+        * Add back the dynamically-calculated register state
+        * size.
+        */
+       task_size += xstate_size;
+
+       /*
+        * We dynamically size 'struct fpu', so we require that
+        * it be at the end of 'thread_struct' and that
+        * 'thread_struct' be at the end of 'task_struct'.  If
+        * you hit a compile error here, check the structure to
+        * see if something got added to the end.
+        */
+       CHECK_MEMBER_AT_END_OF(struct fpu, state);
+       CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
+       CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
+
+       arch_task_struct_size = task_size;
+}
+
 /*
  * Set up the xstate_size based on the legacy FPU context size.
  *
@@ -287,6 +326,7 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
        fpu__init_system_generic();
        fpu__init_system_xstate_size_legacy();
        fpu__init_system_xstate();
+       fpu__init_task_struct_size();
 
        fpu__init_system_ctx_switch();
 }
@@ -311,9 +351,15 @@ static int __init x86_noxsave_setup(char *s)
 
        setup_clear_cpu_cap(X86_FEATURE_XSAVE);
        setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+       setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
        setup_clear_cpu_cap(X86_FEATURE_XSAVES);
        setup_clear_cpu_cap(X86_FEATURE_AVX);
        setup_clear_cpu_cap(X86_FEATURE_AVX2);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512F);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
+       setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
+       setup_clear_cpu_cap(X86_FEATURE_MPX);
 
        return 1;
 }
index c3e985d1751ced9dbab5ac0aa7c38f9623b449f4..d05bd2e2ee91e642b0162a00021043e79d3b4b3a 100644 (file)
@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
 NOKPROBE_SYMBOL(default_do_nmi);
 
 /*
- * NMIs can hit breakpoints which will cause it to lose its
- * NMI context with the CPU when the breakpoint does an iret.
- */
-#ifdef CONFIG_X86_32
-/*
- * For i386, NMIs use the same stack as the kernel, and we can
- * add a workaround to the iret problem in C (preventing nested
- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
- * can be in:
+ * NMIs can page fault or hit breakpoints which will cause it to lose
+ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
+ *
+ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
+ * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
+ * if the outer NMI came from kernel mode, but we can still nest if the
+ * outer NMI came from user mode.
+ *
+ * To handle these nested NMIs, we have three states:
  *
  *  1) not running
  *  2) executing
@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
  * (Note, the latch is binary, thus multiple NMIs triggering,
  *  when one is running, are ignored. Only one NMI is restarted.)
  *
- * If an NMI hits a breakpoint that executes an iret, another
- * NMI can preempt it. We do not want to allow this new NMI
- * to run, but we want to execute it when the first one finishes.
- * We set the state to "latched", and the exit of the first NMI will
- * perform a dec_return, if the result is zero (NOT_RUNNING), then
- * it will simply exit the NMI handler. If not, the dec_return
- * would have set the state to NMI_EXECUTING (what we want it to
- * be when we are running). In this case, we simply jump back
- * to rerun the NMI handler again, and restart the 'latched' NMI.
+ * If an NMI executes an iret, another NMI can preempt it. We do not
+ * want to allow this new NMI to run, but we want to execute it when the
+ * first one finishes.  We set the state to "latched", and the exit of
+ * the first NMI will perform a dec_return, if the result is zero
+ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
+ * dec_return would have set the state to NMI_EXECUTING (what we want it
+ * to be when we are running). In this case, we simply jump back to
+ * rerun the NMI handler again, and restart the 'latched' NMI.
  *
  * No trap (breakpoint or page fault) should be hit before nmi_restart,
  * thus there is no race between the first check of state for NOT_RUNNING
@@ -461,49 +460,36 @@ enum nmi_states {
 static DEFINE_PER_CPU(enum nmi_states, nmi_state);
 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
 
-#define nmi_nesting_preprocess(regs)                                   \
-       do {                                                            \
-               if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {      \
-                       this_cpu_write(nmi_state, NMI_LATCHED);         \
-                       return;                                         \
-               }                                                       \
-               this_cpu_write(nmi_state, NMI_EXECUTING);               \
-               this_cpu_write(nmi_cr2, read_cr2());                    \
-       } while (0);                                                    \
-       nmi_restart:
-
-#define nmi_nesting_postprocess()                                      \
-       do {                                                            \
-               if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))     \
-                       write_cr2(this_cpu_read(nmi_cr2));              \
-               if (this_cpu_dec_return(nmi_state))                     \
-                       goto nmi_restart;                               \
-       } while (0)
-#else /* x86_64 */
+#ifdef CONFIG_X86_64
 /*
- * In x86_64 things are a bit more difficult. This has the same problem
- * where an NMI hitting a breakpoint that calls iret will remove the
- * NMI context, allowing a nested NMI to enter. What makes this more
- * difficult is that both NMIs and breakpoints have their own stack.
- * When a new NMI or breakpoint is executed, the stack is set to a fixed
- * point. If an NMI is nested, it will have its stack set at that same
- * fixed address that the first NMI had, and will start corrupting the
- * stack. This is handled in entry_64.S, but the same problem exists with
- * the breakpoint stack.
+ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
+ * some care, the inner breakpoint will clobber the outer breakpoint's
+ * stack.
  *
- * If a breakpoint is being processed, and the debug stack is being used,
- * if an NMI comes in and also hits a breakpoint, the stack pointer
- * will be set to the same fixed address as the breakpoint that was
- * interrupted, causing that stack to be corrupted. To handle this case,
- * check if the stack that was interrupted is the debug stack, and if
- * so, change the IDT so that new breakpoints will use the current stack
- * and not switch to the fixed address. On return of the NMI, switch back
- * to the original IDT.
+ * If a breakpoint is being processed, and the debug stack is being
+ * used, if an NMI comes in and also hits a breakpoint, the stack
+ * pointer will be set to the same fixed address as the breakpoint that
+ * was interrupted, causing that stack to be corrupted. To handle this
+ * case, check if the stack that was interrupted is the debug stack, and
+ * if so, change the IDT so that new breakpoints will use the current
+ * stack and not switch to the fixed address. On return of the NMI,
+ * switch back to the original IDT.
  */
 static DEFINE_PER_CPU(int, update_debug_stack);
+#endif
 
-static inline void nmi_nesting_preprocess(struct pt_regs *regs)
+dotraplinkage notrace void
+do_nmi(struct pt_regs *regs, long error_code)
 {
+       if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
+               this_cpu_write(nmi_state, NMI_LATCHED);
+               return;
+       }
+       this_cpu_write(nmi_state, NMI_EXECUTING);
+       this_cpu_write(nmi_cr2, read_cr2());
+nmi_restart:
+
+#ifdef CONFIG_X86_64
        /*
         * If we interrupted a breakpoint, it is possible that
         * the nmi handler will have breakpoints too. We need to
@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
                debug_stack_set_zero();
                this_cpu_write(update_debug_stack, 1);
        }
-}
-
-static inline void nmi_nesting_postprocess(void)
-{
-       if (unlikely(this_cpu_read(update_debug_stack))) {
-               debug_stack_reset();
-               this_cpu_write(update_debug_stack, 0);
-       }
-}
 #endif
 
-dotraplinkage notrace void
-do_nmi(struct pt_regs *regs, long error_code)
-{
-       nmi_nesting_preprocess(regs);
-
        nmi_enter();
 
        inc_irq_stat(__nmi_count);
@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
 
        nmi_exit();
 
-       /* On i386, may loop back to preprocess */
-       nmi_nesting_postprocess();
+#ifdef CONFIG_X86_64
+       if (unlikely(this_cpu_read(update_debug_stack))) {
+               debug_stack_reset();
+               this_cpu_write(update_debug_stack, 0);
+       }
+#endif
+
+       if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
+               write_cr2(this_cpu_read(nmi_cr2));
+       if (this_cpu_dec_return(nmi_state))
+               goto nmi_restart;
 }
 NOKPROBE_SYMBOL(do_nmi);
 
index 9cad694ed7c4d6a755b34af705e0a055cb0c04aa..397688beed4be5ce7d9445d7847d44613d2d84b5 100644 (file)
@@ -81,7 +81,7 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
  */
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
-       *dst = *src;
+       memcpy(dst, src, arch_task_struct_size);
 
        return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
 }
index d3010aa79dafaff14e74b140e12daa2756f03160..b1f3ed9c7a9efcb1df4ef0da09a731600cbeb679 100644 (file)
@@ -992,8 +992,17 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
 
        common_cpu_up(cpu, tidle);
 
+       /*
+        * We have to walk the irq descriptors to setup the vector
+        * space for the cpu which comes online.  Prevent irq
+        * alloc/free across the bringup.
+        */
+       irq_lock_sparse();
+
        err = do_boot_cpu(apicid, cpu, tidle);
+
        if (err) {
+               irq_unlock_sparse();
                pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
                return -EIO;
        }
@@ -1011,6 +1020,8 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
                touch_nmi_watchdog();
        }
 
+       irq_unlock_sparse();
+
        return 0;
 }
 
index 64dd467930997adbfa6aecd25641ef2004c5488c..2fbea2544f2437bc0ae50ef00288dc320effd81e 100644 (file)
@@ -98,6 +98,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
                best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
        vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
+       if (vcpu->arch.eager_fpu)
+               kvm_x86_ops->fpu_activate(vcpu);
 
        /*
         * The existing code assumes virtual address is 48-bit in the canonical
index 7dbced309ddb526d99ae41114bf3afd8488ccaa8..5c520ebf6343270272679e213b19cd9380b63293 100644 (file)
@@ -200,6 +200,7 @@ int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev)
                        goto out_unmap;
        }
 
+       kvm_arch_start_assignment(kvm);
        pci_set_dev_assigned(pdev);
 
        dev_info(&pdev->dev, "kvm assign device\n");
@@ -224,6 +225,7 @@ int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev)
        iommu_detach_device(domain, &pdev->dev);
 
        pci_clear_dev_assigned(pdev);
+       kvm_arch_end_assignment(kvm);
 
        dev_info(&pdev->dev, "kvm deassign device\n");
 
index 954e98a8c2e38bf9861d4d6349eb721264e8cb18..2a5ca97c263bb48092ea80f88c7d30120ea63b6e 100644 (file)
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        for (i = 0; i < APIC_LVT_NUM; i++)
                apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
        apic_update_lvtt(apic);
-       if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
+       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
                apic_set_reg(apic, APIC_LVT0,
                             SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
        apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
index f807496b62c2cc76e82a60cd58ee187f0cdc77c2..44171462bd2a31561645aff21c83584b82eed6ff 100644 (file)
@@ -2479,6 +2479,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
        return 0;
 }
 
+static bool kvm_is_mmio_pfn(pfn_t pfn)
+{
+       if (pfn_valid(pfn))
+               return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+
+       return true;
+}
+
 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                    unsigned pte_access, int level,
                    gfn_t gfn, pfn_t pfn, bool speculative,
@@ -2506,7 +2514,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                spte |= PT_PAGE_SIZE_MASK;
        if (tdp_enabled)
                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
-                       kvm_is_reserved_pfn(pfn));
+                       kvm_is_mmio_pfn(pfn));
 
        if (host_writable)
                spte |= SPTE_HOST_WRITEABLE;
index de1d2d8062e24048232af909d684f0c2ed9e21bc..dc0a84a6f3094ac997701de74c868763e6843229 100644 (file)
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
        return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
 }
 
+static u8 mtrr_disabled_type(void)
+{
+       /*
+        * Intel SDM 11.11.2.2: all MTRRs are disabled when
+        * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
+        * memory type is applied to all of physical memory.
+        */
+       return MTRR_TYPE_UNCACHABLE;
+}
+
 /*
 * Three terms are used in the following code:
 * - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
 
        /* output fields. */
        int mem_type;
+       /* mtrr is completely disabled? */
+       bool mtrr_disabled;
        /* [start, end) is not fully covered in MTRRs? */
        bool partial_map;
 
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
 static void mtrr_lookup_start(struct mtrr_iter *iter)
 {
        if (!mtrr_is_enabled(iter->mtrr_state)) {
-               iter->partial_map = true;
+               iter->mtrr_disabled = true;
                return;
        }
 
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
        iter->mtrr_state = mtrr_state;
        iter->start = start;
        iter->end = end;
+       iter->mtrr_disabled = false;
        iter->partial_map = false;
        iter->fixed = false;
        iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
                return MTRR_TYPE_WRBACK;
        }
 
-       /* It is not covered by MTRRs. */
-       if (iter.partial_map) {
-               /*
-                * We just check one page, partially covered by MTRRs is
-                * impossible.
-                */
-               WARN_ON(type != -1);
-               type = mtrr_default_type(mtrr_state);
-       }
+       if (iter.mtrr_disabled)
+               return mtrr_disabled_type();
+
+       /*
+        * We just check one page, partially covered by MTRRs is
+        * impossible.
+        */
+       WARN_ON(iter.partial_map);
+
+       /* not contained in any MTRRs. */
+       if (type == -1)
+               return mtrr_default_type(mtrr_state);
+
        return type;
 }
 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                        return false;
        }
 
+       if (iter.mtrr_disabled)
+               return true;
+
        if (!iter.partial_map)
                return true;
 
index 602b974a60a626e18d11965ed5ad1461c329bee8..8e0c0844c6b9681e31e64bdeba0f1822108bec3e 100644 (file)
@@ -865,6 +865,64 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
        set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
 }
 
+#define MTRR_TYPE_UC_MINUS     7
+#define MTRR2PROTVAL_INVALID 0xff
+
+static u8 mtrr2protval[8];
+
+static u8 fallback_mtrr_type(int mtrr)
+{
+       /*
+        * WT and WP aren't always available in the host PAT.  Treat
+        * them as UC and UC- respectively.  Everything else should be
+        * there.
+        */
+       switch (mtrr)
+       {
+       case MTRR_TYPE_WRTHROUGH:
+               return MTRR_TYPE_UNCACHABLE;
+       case MTRR_TYPE_WRPROT:
+               return MTRR_TYPE_UC_MINUS;
+       default:
+               BUG();
+       }
+}
+
+static void build_mtrr2protval(void)
+{
+       int i;
+       u64 pat;
+
+       for (i = 0; i < 8; i++)
+               mtrr2protval[i] = MTRR2PROTVAL_INVALID;
+
+       /* Ignore the invalid MTRR types.  */
+       mtrr2protval[2] = 0;
+       mtrr2protval[3] = 0;
+
+       /*
+        * Use host PAT value to figure out the mapping from guest MTRR
+        * values to nested page table PAT/PCD/PWT values.  We do not
+        * want to change the host PAT value every time we enter the
+        * guest.
+        */
+       rdmsrl(MSR_IA32_CR_PAT, pat);
+       for (i = 0; i < 8; i++) {
+               u8 mtrr = pat >> (8 * i);
+
+               if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
+                       mtrr2protval[mtrr] = __cm_idx2pte(i);
+       }
+
+       for (i = 0; i < 8; i++) {
+               if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
+                       u8 fallback = fallback_mtrr_type(i);
+                       mtrr2protval[i] = mtrr2protval[fallback];
+                       BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
+               }
+       }
+}
+
 static __init int svm_hardware_setup(void)
 {
        int cpu;
@@ -931,6 +989,7 @@ static __init int svm_hardware_setup(void)
        } else
                kvm_disable_tdp();
 
+       build_mtrr2protval();
        return 0;
 
 err:
@@ -1085,6 +1144,39 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
        return target_tsc - tsc;
 }
 
+static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
+{
+       struct kvm_vcpu *vcpu = &svm->vcpu;
+
+       /* Unlike Intel, AMD takes the guest's CR0.CD into account.
+        *
+        * AMD does not have IPAT.  To emulate it for the case of guests
+        * with no assigned devices, just set everything to WB.  If guests
+        * have assigned devices, however, we cannot force WB for RAM
+        * pages only, so use the guest PAT directly.
+        */
+       if (!kvm_arch_has_assigned_device(vcpu->kvm))
+               *g_pat = 0x0606060606060606;
+       else
+               *g_pat = vcpu->arch.pat;
+}
+
+static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+       u8 mtrr;
+
+       /*
+        * 1. MMIO: trust guest MTRR, so same as item 3.
+        * 2. No passthrough: always map as WB, and force guest PAT to WB as well
+        * 3. Passthrough: can't guarantee the result, try to trust guest.
+        */
+       if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
+               return 0;
+
+       mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
+       return mtrr2protval[mtrr];
+}
+
 static void init_vmcb(struct vcpu_svm *svm, bool init_event)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
@@ -1180,6 +1272,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
                clr_cr_intercept(svm, INTERCEPT_CR3_READ);
                clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
                save->g_pat = svm->vcpu.arch.pat;
+               svm_set_guest_pat(svm, &save->g_pat);
                save->cr3 = 0;
                save->cr4 = 0;
        }
@@ -1579,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
         * does not do it - this results in some delay at
         * reboot
         */
-       if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED))
+       if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
                cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
        mark_dirty(svm->vmcb, VMCB_CR);
@@ -3254,6 +3347,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
                break;
+       case MSR_IA32_CR_PAT:
+               if (npt_enabled) {
+                       if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+                               return 1;
+                       vcpu->arch.pat = data;
+                       svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
+                       mark_dirty(svm->vmcb, VMCB_NPT);
+                       break;
+               }
+               /* fall through */
        default:
                return kvm_set_msr_common(vcpu, msr);
        }
@@ -4088,11 +4191,6 @@ static bool svm_has_high_real_mode_segbase(void)
        return true;
 }
 
-static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
-       return 0;
-}
-
 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 {
 }
index e856dd566f4c2a6c10a4a6dc9c6ea018fe72cdda..83b7b5cd75d52dd67976274da3c11807f1c35490 100644 (file)
@@ -8632,22 +8632,17 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
        u64 ipat = 0;
 
        /* For VT-d and EPT combination
-        * 1. MMIO: always map as UC
+        * 1. MMIO: guest may want to apply WC, trust it.
         * 2. EPT with VT-d:
         *   a. VT-d without snooping control feature: can't guarantee the
-        *      result, try to trust guest.
+        *      result, try to trust guest.  So the same as item 1.
         *   b. VT-d with snooping control feature: snooping control feature of
         *      VT-d engine can guarantee the cache correctness. Just set it
         *      to WB to keep consistent with host. So the same as item 3.
         * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
         *    consistent with host MTRR
         */
-       if (is_mmio) {
-               cache = MTRR_TYPE_UNCACHABLE;
-               goto exit;
-       }
-
-       if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+       if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
                ipat = VMX_EPT_IPAT_BIT;
                cache = MTRR_TYPE_WRBACK;
                goto exit;
@@ -8655,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
 
        if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
                ipat = VMX_EPT_IPAT_BIT;
-               cache = MTRR_TYPE_UNCACHABLE;
+               if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+                       cache = MTRR_TYPE_WRBACK;
+               else
+                       cache = MTRR_TYPE_UNCACHABLE;
                goto exit;
        }
 
index bbaf44e8f0d3cdd7100c40c98ccf2ab40ae1ea2f..5ef2560075bfb80e6fdabcdf51f71258091e4339 100644 (file)
@@ -3157,8 +3157,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
                        cpuid_count(XSTATE_CPUID, index,
                                    &size, &offset, &ecx, &edx);
                        memcpy(dest, src + offset, size);
-               } else
-                       WARN_ON_ONCE(1);
+               }
 
                valid -= feature;
        }
@@ -7315,11 +7314,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 
        vcpu = kvm_x86_ops->vcpu_create(kvm, id);
 
-       /*
-        * Activate fpu unconditionally in case the guest needs eager FPU.  It will be
-        * deactivated soon if it doesn't.
-        */
-       kvm_x86_ops->fpu_activate(vcpu);
        return vcpu;
 }
 
@@ -8218,6 +8212,24 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
                        kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
+void kvm_arch_start_assignment(struct kvm *kvm)
+{
+       atomic_inc(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
+
+void kvm_arch_end_assignment(struct kvm *kvm)
+{
+       atomic_dec(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
+
+bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+       return atomic_read(&kvm->arch.assigned_device_count);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
+
 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
 {
        atomic_inc(&kvm->arch.noncoherent_dma_count);
index edc8cdcd786b00627a1029c8bb3b2aedcb291d09..0ca2f3e4803c2a5846be297a51eb5c1cf67aff4a 100644 (file)
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
        return kvm_register_write(vcpu, reg, val);
 }
 
+static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
+{
+       return !(kvm->arch.disabled_quirks & quirk);
+}
+
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
index cc5ccc415cc01ef8ea9e58b3f81a281c9ab412bf..b9c78f3bcd6739718def393f49d1f1cbb0e8bb5c 100644 (file)
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
                    !PageReserved(pfn_to_page(start_pfn + i)))
                        return 1;
 
-       WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
-
        return 0;
 }
 
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        pgprot_t prot;
        int retval;
        void __iomem *ret_addr;
-       int ram_region;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
        /*
         * Don't allow anybody to remap normal RAM that we're using..
         */
-       /* First check if whole region can be identified as RAM or not */
-       ram_region = region_is_ram(phys_addr, size);
-       if (ram_region > 0) {
-               WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
-                               (unsigned long int)phys_addr,
-                               (unsigned long int)last_addr);
+       pfn      = phys_addr >> PAGE_SHIFT;
+       last_pfn = last_addr >> PAGE_SHIFT;
+       if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+                                         __ioremap_check_ram) == 1) {
+               WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+                         &phys_addr, &last_addr);
                return NULL;
        }
 
-       /* If could not be identified(-1), check page by page */
-       if (ram_region < 0) {
-               pfn      = phys_addr >> PAGE_SHIFT;
-               last_pfn = last_addr >> PAGE_SHIFT;
-               if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
-                                         __ioremap_check_ram) == 1)
-                       return NULL;
-       }
        /*
         * Mappings have to be page-aligned
         */
index 9d518d693b4b7adf07463c695f3cd14412a19192..844b06d67df4da95cec611375d55c05d52884efd 100644 (file)
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & VM_MPX)
+               return "[mpx]";
+       return NULL;
+}
index 7a657f58bbea152057262a61e325c169f78bc516..db1b0bc5017c9f01b456a97b5b84d6d40a2c03b2 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <asm/trace/mpx.h>
 
-static const char *mpx_mapping_name(struct vm_area_struct *vma)
-{
-       return "[mpx]";
-}
-
-static struct vm_operations_struct mpx_vma_ops = {
-       .name = mpx_mapping_name,
-};
-
-static int is_mpx_vma(struct vm_area_struct *vma)
-{
-       return (vma->vm_ops == &mpx_vma_ops);
-}
-
 static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
 {
        if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
 /*
  * This is really a simplified "vm_mmap". it only handles MPX
  * bounds tables (the bounds directory is user-allocated).
- *
- * Later on, we use the vma->vm_ops to uniquely identify these
- * VMAs.
  */
 static unsigned long mpx_mmap(unsigned long len)
 {
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
                ret = -ENOMEM;
                goto out;
        }
-       vma->vm_ops = &mpx_vma_ops;
 
        if (vm_flags & VM_LOCKED) {
                up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
                 * so stop immediately and return an error.  This
                 * probably results in a SIGSEGV.
                 */
-               if (!is_mpx_vma(vma))
+               if (!(vma->vm_flags & VM_MPX))
                        return -EINVAL;
 
                len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
         * lots of tables even though we have no actual table
         * entries in use.
         */
-       while (next && is_mpx_vma(next))
+       while (next && (next->vm_flags & VM_MPX))
                next = next->vm_next;
-       while (prev && is_mpx_vma(prev))
+       while (prev && (prev->vm_flags & VM_MPX))
                prev = prev->vm_prev;
        /*
         * We know 'start' and 'end' lie within an area controlled
index 3250f2371aea5c9f2c8f8f19d4f6535627e0e188..90b924acd9822ffdd9409b9fd97325ea7954b97a 100644 (file)
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
                } else {
                        unsigned long addr;
                        unsigned long nr_pages =
-                               f->flush_end - f->flush_start / PAGE_SIZE;
+                               (f->flush_end - f->flush_start) / PAGE_SIZE;
                        addr = f->flush_start;
                        while (addr < f->flush_end) {
                                __flush_tlb_single(addr);
index 579a8fd74be07804d983a298641b755526f1cb44..ec5214f39aa802ed923d10315a1672547493262d 100644 (file)
@@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
        EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
              offsetof(struct bpf_array, map.max_entries));
        EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
-#define OFFSET1 44 /* number of bytes to jump */
+#define OFFSET1 47 /* number of bytes to jump */
        EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
        label1 = cnt;
 
@@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
         */
        EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
        EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 33
+#define OFFSET2 36
        EMIT2(X86_JA, OFFSET2);                   /* ja out */
        label2 = cnt;
        EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
        EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
 
        /* prog = array->prog[index]; */
-       EMIT4(0x48, 0x8D, 0x44, 0xD6);            /* lea rax, [rsi + rdx * 8 + 0x50] */
-       EMIT1(offsetof(struct bpf_array, prog));
+       EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
+                   offsetof(struct bpf_array, prog));
        EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
 
        /* if (prog == NULL)
@@ -315,6 +315,26 @@ static void emit_bpf_tail_call(u8 **pprog)
        *pprog = prog;
 }
 
+
+static void emit_load_skb_data_hlen(u8 **pprog)
+{
+       u8 *prog = *pprog;
+       int cnt = 0;
+
+       /* r9d = skb->len - skb->data_len (headlen)
+        * r10 = skb->data
+        */
+       /* mov %r9d, off32(%rdi) */
+       EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
+
+       /* sub %r9d, off32(%rdi) */
+       EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
+
+       /* mov %r10, off32(%rdi) */
+       EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
+       *pprog = prog;
+}
+
 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                  int oldproglen, struct jit_context *ctx)
 {
@@ -329,36 +349,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 
        emit_prologue(&prog);
 
-       if (seen_ld_abs) {
-               /* r9d : skb->len - skb->data_len (headlen)
-                * r10 : skb->data
-                */
-               if (is_imm8(offsetof(struct sk_buff, len)))
-                       /* mov %r9d, off8(%rdi) */
-                       EMIT4(0x44, 0x8b, 0x4f,
-                             offsetof(struct sk_buff, len));
-               else
-                       /* mov %r9d, off32(%rdi) */
-                       EMIT3_off32(0x44, 0x8b, 0x8f,
-                                   offsetof(struct sk_buff, len));
-
-               if (is_imm8(offsetof(struct sk_buff, data_len)))
-                       /* sub %r9d, off8(%rdi) */
-                       EMIT4(0x44, 0x2b, 0x4f,
-                             offsetof(struct sk_buff, data_len));
-               else
-                       EMIT3_off32(0x44, 0x2b, 0x8f,
-                                   offsetof(struct sk_buff, data_len));
-
-               if (is_imm8(offsetof(struct sk_buff, data)))
-                       /* mov %r10, off8(%rdi) */
-                       EMIT4(0x4c, 0x8b, 0x57,
-                             offsetof(struct sk_buff, data));
-               else
-                       /* mov %r10, off32(%rdi) */
-                       EMIT3_off32(0x4c, 0x8b, 0x97,
-                                   offsetof(struct sk_buff, data));
-       }
+       if (seen_ld_abs)
+               emit_load_skb_data_hlen(&prog);
 
        for (i = 0; i < insn_cnt; i++, insn++) {
                const s32 imm32 = insn->imm;
@@ -367,6 +359,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                u8 b1 = 0, b2 = 0, b3 = 0;
                s64 jmp_offset;
                u8 jmp_cond;
+               bool reload_skb_data;
                int ilen;
                u8 *func;
 
@@ -818,12 +811,18 @@ xadd:                     if (is_imm8(insn->off))
                        func = (u8 *) __bpf_call_base + imm32;
                        jmp_offset = func - (image + addrs[i]);
                        if (seen_ld_abs) {
-                               EMIT2(0x41, 0x52); /* push %r10 */
-                               EMIT2(0x41, 0x51); /* push %r9 */
-                               /* need to adjust jmp offset, since
-                                * pop %r9, pop %r10 take 4 bytes after call insn
-                                */
-                               jmp_offset += 4;
+                               reload_skb_data = bpf_helper_changes_skb_data(func);
+                               if (reload_skb_data) {
+                                       EMIT1(0x57); /* push %rdi */
+                                       jmp_offset += 22; /* pop, mov, sub, mov */
+                               } else {
+                                       EMIT2(0x41, 0x52); /* push %r10 */
+                                       EMIT2(0x41, 0x51); /* push %r9 */
+                                       /* need to adjust jmp offset, since
+                                        * pop %r9, pop %r10 take 4 bytes after call insn
+                                        */
+                                       jmp_offset += 4;
+                               }
                        }
                        if (!imm32 || !is_simm32(jmp_offset)) {
                                pr_err("unsupported bpf func %d addr %p image %p\n",
@@ -832,8 +831,13 @@ xadd:                      if (is_imm8(insn->off))
                        }
                        EMIT1_off32(0xE8, jmp_offset);
                        if (seen_ld_abs) {
-                               EMIT2(0x41, 0x59); /* pop %r9 */
-                               EMIT2(0x41, 0x5A); /* pop %r10 */
+                               if (reload_skb_data) {
+                                       EMIT1(0x5F); /* pop %rdi */
+                                       emit_load_skb_data_hlen(&prog);
+                               } else {
+                                       EMIT2(0x41, 0x59); /* pop %r9 */
+                                       EMIT2(0x41, 0x5A); /* pop %r10 */
+                               }
                        }
                        break;
 
@@ -1099,7 +1103,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
        }
 
        if (bpf_jit_enable > 1)
-               bpf_jit_dump(prog->len, proglen, 0, image);
+               bpf_jit_dump(prog->len, proglen, pass + 1, image);
 
        if (image) {
                bpf_flush_icache(header, image + proglen);
index 14d15bf1a95bfb408bc6ac56f41ad51545228e6b..5b478accd5fcae8237ca010330a1c99d299fa6b4 100644 (file)
@@ -19,6 +19,7 @@ generic-y += linkage.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += resource.h
diff --git a/arch/xtensa/include/asm/mm-arch-hooks.h b/arch/xtensa/include/asm/mm-arch-hooks.h
deleted file mode 100644 (file)
index d2e5cfd..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _ASM_XTENSA_MM_ARCH_HOOKS_H
-#define _ASM_XTENSA_MM_ARCH_HOOKS_H
-
-#endif /* _ASM_XTENSA_MM_ARCH_HOOKS_H */
index 0436c21db7f23b9c5fddf5fce48bc4b21541cd9f..719b7152aed14060cc957dd4acd349f6ab099d64 100644 (file)
@@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
        unsigned long idx = BIO_POOL_NONE;
        unsigned inline_vecs;
 
-       if (!bs) {
+       if (!bs || !bs->bio_integrity_pool) {
                bip = kmalloc(sizeof(struct bio_integrity_payload) +
                              sizeof(struct bio_vec) * nr_vecs, gfp_mask);
                inline_vecs = nr_vecs;
@@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
                kfree(page_address(bip->bip_vec->bv_page) +
                      bip->bip_vec->bv_offset);
 
-       if (bs) {
+       if (bs && bs->bio_integrity_pool) {
                if (bip->bip_slab != BIO_POOL_NONE)
                        bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
                                  bip->bip_slab);
index 2a00d349cd6883cba32d9fd477251889a1c58081..d6e5ba3399f0ae151ea040e2ec1fd1df1c3dba6a 100644 (file)
@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
  * Allocates and returns a new bio which represents @sectors from the start of
  * @bio, and updates @bio to represent the remaining sectors.
  *
- * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
- * responsibility to ensure that @bio is not freed before the split.
+ * Unless this is a discard request the newly allocated bio will point
+ * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
+ * @bio is not freed before the split.
  */
 struct bio *bio_split(struct bio *bio, int sectors,
                      gfp_t gfp, struct bio_set *bs)
@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
        BUG_ON(sectors <= 0);
        BUG_ON(sectors >= bio_sectors(bio));
 
-       split = bio_clone_fast(bio, gfp, bs);
+       /*
+        * Discards need a mutable bio_vec to accommodate the payload
+        * required by the DSM TRIM and UNMAP commands.
+        */
+       if (bio->bi_rw & REQ_DISCARD)
+               split = bio_clone_bioset(bio, gfp, bs);
+       else
+               split = bio_clone_fast(bio, gfp, bs);
+
        if (!split)
                return NULL;
 
@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
        bio->bi_css = blkcg_css;
        return 0;
 }
+EXPORT_SYMBOL_GPL(bio_associate_blkcg);
 
 /**
  * bio_associate_current - associate a bio with %current
@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
        bio->bi_css = task_get_css(current, blkio_cgrp_id);
        return 0;
 }
+EXPORT_SYMBOL_GPL(bio_associate_current);
 
 /**
  * bio_disassociate_task - undo bio_associate_current()
index 9f97da52d006281b1ab3e2911d85934216e3931a..d6283b3f5db50674d18ae485970f1e24ed44569d 100644 (file)
 
 #define MAX_KEY_LEN 100
 
+/*
+ * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
+ * blkcg_pol_register_mutex nests outside of it and synchronizes entire
+ * policy [un]register operations including cgroup file additions /
+ * removals.  Putting cgroup file registration outside blkcg_pol_mutex
+ * allows grabbing it from cgroup callbacks.
+ */
+static DEFINE_MUTEX(blkcg_pol_register_mutex);
 static DEFINE_MUTEX(blkcg_pol_mutex);
 
 struct blkcg blkcg_root;
@@ -38,6 +46,8 @@ struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
 
 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
 
+static LIST_HEAD(all_blkcgs);          /* protected by blkcg_pol_mutex */
+
 static bool blkcg_policy_enabled(struct request_queue *q,
                                 const struct blkcg_policy *pol)
 {
@@ -453,20 +463,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
        struct blkcg_gq *blkg;
        int i;
 
-       /*
-        * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
-        * which ends up putting cgroup's internal cgroup_tree_mutex under
-        * it; however, cgroup_tree_mutex is nested above cgroup file
-        * active protection and grabbing blkcg_pol_mutex from a cgroup
-        * file operation creates a possible circular dependency.  cgroup
-        * internal locking is planned to go through further simplification
-        * and this issue should go away soon.  For now, let's trylock
-        * blkcg_pol_mutex and restart the write on failure.
-        *
-        * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
-        */
-       if (!mutex_trylock(&blkcg_pol_mutex))
-               return restart_syscall();
+       mutex_lock(&blkcg_pol_mutex);
        spin_lock_irq(&blkcg->lock);
 
        /*
@@ -721,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                return -EINVAL;
 
        disk = get_gendisk(MKDEV(major, minor), &part);
-       if (!disk || part)
+       if (!disk)
+               return -EINVAL;
+       if (part) {
+               put_disk(disk);
                return -EINVAL;
+       }
 
        rcu_read_lock();
        spin_lock_irq(disk->queue->queue_lock);
@@ -822,8 +823,17 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
 {
        struct blkcg *blkcg = css_to_blkcg(css);
 
-       if (blkcg != &blkcg_root)
+       mutex_lock(&blkcg_pol_mutex);
+       list_del(&blkcg->all_blkcgs_node);
+       mutex_unlock(&blkcg_pol_mutex);
+
+       if (blkcg != &blkcg_root) {
+               int i;
+
+               for (i = 0; i < BLKCG_MAX_POLS; i++)
+                       kfree(blkcg->pd[i]);
                kfree(blkcg);
+       }
 }
 
 static struct cgroup_subsys_state *
@@ -833,6 +843,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
        struct cgroup_subsys_state *ret;
        int i;
 
+       mutex_lock(&blkcg_pol_mutex);
+
        if (!parent_css) {
                blkcg = &blkcg_root;
                goto done;
@@ -875,14 +887,17 @@ done:
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&blkcg->cgwb_list);
 #endif
+       list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
+
+       mutex_unlock(&blkcg_pol_mutex);
        return &blkcg->css;
 
 free_pd_blkcg:
        for (i--; i >= 0; i--)
                kfree(blkcg->pd[i]);
-
 free_blkcg:
        kfree(blkcg);
+       mutex_unlock(&blkcg_pol_mutex);
        return ret;
 }
 
@@ -1037,10 +1052,8 @@ int blkcg_activate_policy(struct request_queue *q,
                          const struct blkcg_policy *pol)
 {
        LIST_HEAD(pds);
-       LIST_HEAD(cpds);
        struct blkcg_gq *blkg;
        struct blkg_policy_data *pd, *nd;
-       struct blkcg_policy_data *cpd, *cnd;
        int cnt = 0, ret;
 
        if (blkcg_policy_enabled(q, pol))
@@ -1053,10 +1066,7 @@ int blkcg_activate_policy(struct request_queue *q,
                cnt++;
        spin_unlock_irq(q->queue_lock);
 
-       /*
-        * Allocate per-blkg and per-blkcg policy data
-        * for all existing blkgs.
-        */
+       /* allocate per-blkg policy data for all existing blkgs */
        while (cnt--) {
                pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
                if (!pd) {
@@ -1064,15 +1074,6 @@ int blkcg_activate_policy(struct request_queue *q,
                        goto out_free;
                }
                list_add_tail(&pd->alloc_node, &pds);
-
-               if (!pol->cpd_size)
-                       continue;
-               cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node);
-               if (!cpd) {
-                       ret = -ENOMEM;
-                       goto out_free;
-               }
-               list_add_tail(&cpd->alloc_node, &cpds);
        }
 
        /*
@@ -1082,32 +1083,17 @@ int blkcg_activate_policy(struct request_queue *q,
        spin_lock_irq(q->queue_lock);
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
-               if (WARN_ON(list_empty(&pds)) ||
-                   WARN_ON(pol->cpd_size && list_empty(&cpds))) {
+               if (WARN_ON(list_empty(&pds))) {
                        /* umm... this shouldn't happen, just abort */
                        ret = -ENOMEM;
                        goto out_unlock;
                }
-               cpd = list_first_entry(&cpds, struct blkcg_policy_data,
-                                      alloc_node);
-               list_del_init(&cpd->alloc_node);
                pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
                list_del_init(&pd->alloc_node);
 
                /* grab blkcg lock too while installing @pd on @blkg */
                spin_lock(&blkg->blkcg->lock);
 
-               if (!pol->cpd_size)
-                       goto no_cpd;
-               if (!blkg->blkcg->pd[pol->plid]) {
-                       /* Per-policy per-blkcg data */
-                       blkg->blkcg->pd[pol->plid] = cpd;
-                       cpd->plid = pol->plid;
-                       pol->cpd_init_fn(blkg->blkcg);
-               } else { /* must free it as it has already been extracted */
-                       kfree(cpd);
-               }
-no_cpd:
                blkg->pd[pol->plid] = pd;
                pd->blkg = blkg;
                pd->plid = pol->plid;
@@ -1124,8 +1110,6 @@ out_free:
        blk_queue_bypass_end(q);
        list_for_each_entry_safe(pd, nd, &pds, alloc_node)
                kfree(pd);
-       list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node)
-               kfree(cpd);
        return ret;
 }
 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@@ -1162,8 +1146,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
 
                kfree(blkg->pd[pol->plid]);
                blkg->pd[pol->plid] = NULL;
-               kfree(blkg->blkcg->pd[pol->plid]);
-               blkg->blkcg->pd[pol->plid] = NULL;
 
                spin_unlock(&blkg->blkcg->lock);
        }
@@ -1182,11 +1164,13 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
  */
 int blkcg_policy_register(struct blkcg_policy *pol)
 {
+       struct blkcg *blkcg;
        int i, ret;
 
        if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
                return -EINVAL;
 
+       mutex_lock(&blkcg_pol_register_mutex);
        mutex_lock(&blkcg_pol_mutex);
 
        /* find an empty slot */
@@ -1195,19 +1179,49 @@ int blkcg_policy_register(struct blkcg_policy *pol)
                if (!blkcg_policy[i])
                        break;
        if (i >= BLKCG_MAX_POLS)
-               goto out_unlock;
+               goto err_unlock;
 
-       /* register and update blkgs */
+       /* register @pol */
        pol->plid = i;
-       blkcg_policy[i] = pol;
+       blkcg_policy[pol->plid] = pol;
+
+       /* allocate and install cpd's */
+       if (pol->cpd_size) {
+               list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
+                       struct blkcg_policy_data *cpd;
+
+                       cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
+                       if (!cpd) {
+                               mutex_unlock(&blkcg_pol_mutex);
+                               goto err_free_cpds;
+                       }
+
+                       blkcg->pd[pol->plid] = cpd;
+                       cpd->plid = pol->plid;
+                       pol->cpd_init_fn(blkcg);
+               }
+       }
+
+       mutex_unlock(&blkcg_pol_mutex);
 
        /* everything is in place, add intf files for the new policy */
        if (pol->cftypes)
                WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
                                                  pol->cftypes));
-       ret = 0;
-out_unlock:
+       mutex_unlock(&blkcg_pol_register_mutex);
+       return 0;
+
+err_free_cpds:
+       if (pol->cpd_size) {
+               list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
+                       kfree(blkcg->pd[pol->plid]);
+                       blkcg->pd[pol->plid] = NULL;
+               }
+       }
+       blkcg_policy[pol->plid] = NULL;
+err_unlock:
        mutex_unlock(&blkcg_pol_mutex);
+       mutex_unlock(&blkcg_pol_register_mutex);
        return ret;
 }
 EXPORT_SYMBOL_GPL(blkcg_policy_register);
@@ -1220,7 +1234,9 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register);
  */
 void blkcg_policy_unregister(struct blkcg_policy *pol)
 {
-       mutex_lock(&blkcg_pol_mutex);
+       struct blkcg *blkcg;
+
+       mutex_lock(&blkcg_pol_register_mutex);
 
        if (WARN_ON(blkcg_policy[pol->plid] != pol))
                goto out_unlock;
@@ -1229,9 +1245,19 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
        if (pol->cftypes)
                cgroup_rm_cftypes(pol->cftypes);
 
-       /* unregister and update blkgs */
+       /* remove cpds and unregister */
+       mutex_lock(&blkcg_pol_mutex);
+
+       if (pol->cpd_size) {
+               list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
+                       kfree(blkcg->pd[pol->plid]);
+                       blkcg->pd[pol->plid] = NULL;
+               }
+       }
        blkcg_policy[pol->plid] = NULL;
-out_unlock:
+
        mutex_unlock(&blkcg_pol_mutex);
+out_unlock:
+       mutex_unlock(&blkcg_pol_register_mutex);
 }
 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
index 82819e68f58b10c59edaa32c65b5a783306c99ae..627ed0c593fb4c05dd46e1da9eb75ddf2e6c1269 100644 (file)
@@ -3370,7 +3370,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
 int __init blk_dev_init(void)
 {
        BUILD_BUG_ON(__REQ_NR_BITS > 8 *
-                       sizeof(((struct request *)0)->cmd_flags));
+                       FIELD_SIZEOF(struct request, cmd_flags));
 
        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
        kblockd_workqueue = alloc_workqueue("kblockd",
index f53779692c772a1cc06ec341f9fab307b2ceef91..7d842db59699692dd0ebce5daf6a71443f0e4314 100644 (file)
@@ -1998,7 +1998,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                goto err_hctxs;
 
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
-       blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
+       blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
        q->nr_queues = nr_cpu_ids;
        q->nr_hw_queues = set->nr_hw_queues;
index 717afcdb5f4a9657a9ca6f6af9825e109eba9688..88dbbb115285a3ce54ae5c35260692163bae16ca 100644 (file)
@@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
                dev_warn(&device->dev, "Failed to change power state to %s\n",
                         acpi_power_state_string(state));
        } else {
-               device->power.state = state;
+               device->power.state = target_state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Device [%s] transitioned to %s\n",
                                  device->pnp.bus_id,
index 8244f013f21095a9508e80ef01621e0ffbaab106..f1c966e05078426455227d8f582a7ddd0f211cb2 100644 (file)
@@ -193,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win,
        u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
        bool wp = addr->info.mem.write_protect;
        u64 len = attr->address_length;
+       u64 start, end, offset = 0;
        struct resource *res = &win->res;
 
        /*
@@ -204,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win,
                pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
                         addr->min_address_fixed, addr->max_address_fixed, len);
 
-       res->start = attr->minimum;
-       res->end = attr->maximum;
-
        /*
         * For bridges that translate addresses across the bridge,
         * translation_offset is the offset that must be added to the
@@ -214,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win,
         * primary side. Non-bridge devices must list 0 for all Address
         * Translation offset bits.
         */
-       if (addr->producer_consumer == ACPI_PRODUCER) {
-               res->start += attr->translation_offset;
-               res->end += attr->translation_offset;
-       } else if (attr->translation_offset) {
+       if (addr->producer_consumer == ACPI_PRODUCER)
+               offset = attr->translation_offset;
+       else if (attr->translation_offset)
                pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
                         attr->translation_offset);
+       start = attr->minimum + offset;
+       end = attr->maximum + offset;
+
+       win->offset = offset;
+       res->start = start;
+       res->end = end;
+       if (sizeof(resource_size_t) < sizeof(u64) &&
+           (offset != win->offset || start != res->start || end != res->end)) {
+               pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
+                       attr->minimum, attr->maximum);
+               return false;
        }
 
        switch (addr->resource_type) {
@@ -236,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win,
                return false;
        }
 
-       win->offset = attr->translation_offset;
-
        if (addr->producer_consumer == ACPI_PRODUCER)
                res->flags |= IORESOURCE_WINDOW;
 
index e83fc3d0da9c9c60a99a6dec56cc568a97a0a851..db5d9f79a247c5ceb2cb590f206927c22f6f2b7c 100644 (file)
@@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev)
                dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
                                         dev->max_sectors);
 
+       if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
+               dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
+                                        dev->max_sectors);
+
        if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
                dev->max_sectors = ATA_MAX_SECTORS_LBA48;
 
@@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "Slimtype DVD A  DS8A8SH", NULL,      ATA_HORKAGE_MAX_SEC_LBA48 },
        { "Slimtype DVD A  DS8A9SH", NULL,      ATA_HORKAGE_MAX_SEC_LBA48 },
 
+       /*
+        * Causes silent data corruption with higher max sects.
+        * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
+        */
+       { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
+
        /* Devices we expect to fail diagnostics */
 
        /* Devices where NCQ should be avoided */
@@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST3320[68]13AS",     "SD1[5-9]",     ATA_HORKAGE_NONCQ |
                                                ATA_HORKAGE_FIRMWARE_WARN },
 
-       /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+       /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
        { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
        { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+       { "VB0250EAVER",        "HPG7",         ATA_HORKAGE_BROKEN_FPDMA_AA },
 
        /* Blacklist entries taken from Silicon Image 3124/3132
           Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*M500*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
-       { "Micron_M5[15]0*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
+       { "Micron_M5[15]0_*",           "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*M550*",           "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "Samsung SSD 8*",             NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
+       /* devices that don't properly handle TRIM commands */
+       { "SuperSSpeed S238*",          NULL,   ATA_HORKAGE_NOTRIM, },
+
        /*
         * As defined, the DRAT (Deterministic Read After Trim) and RZAT
         * (Return Zero After Trim) flags in the ATA Command Set are
@@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
        else /* In the ancient relic department - skip all of this */
                return 0;
 
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+       /* On some disks, this command causes spin-up, so we need longer timeout */
+       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
 
        DPRINTK("EXIT, err_mask=%x\n", err_mask);
        return err_mask;
index 7ccc084bf1dfb8f7b979f5e7ae777e030303d1b8..85aa76116a305eb50d77f2544c87f09914998bed 100644 (file)
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
                                       ATA_LFLAG_NO_SRST |
                                       ATA_LFLAG_ASSUME_ATA;
                }
+       } else if (vendor == 0x11ab && devid == 0x4140) {
+               /* Marvell 4140 quirks */
+               ata_for_each_link(link, ap, EDGE) {
+                       /* port 4 is for SEMB device and it doesn't like SRST */
+                       if (link->pmp == 4)
+                               link->flags |= ATA_LFLAG_DISABLED;
+               }
        }
 }
 
index 3131adcc1f87e001f7f8bfe317e92527665e4dd4..641a61a59e89c00036af65d3a31fe2cf67eb22b8 100644 (file)
@@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
                rbuf[14] = (lowest_aligned >> 8) & 0x3f;
                rbuf[15] = lowest_aligned;
 
-               if (ata_id_has_trim(args->id)) {
+               if (ata_id_has_trim(args->id) &&
+                   !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
                        rbuf[14] |= 0x80; /* LBPME */
 
                        if (ata_id_has_zero_after_trim(args->id) &&
index d6c37bcd416d17145f291136b6e5f2a7192ee404..e2d94972962d69d766e99e8ade594040ae8d9429 100644 (file)
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
 
        if (!ata_id_has_trim(ata_dev->id))
                mode = "unsupported";
+       else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
+               mode = "forced_unsupported";
        else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
                        mode = "forced_unqueued";
        else if (ata_fpdma_dsm_supported(ata_dev))
index a9b0c820f2ebc7028cb933b7b021e42b3c6a7930..5d9ee99c21481eac524415091e57da0370fa7a64 100644 (file)
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller source file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -968,7 +968,7 @@ static struct platform_driver arasan_cf_driver = {
 
 module_platform_driver(arasan_cf_driver);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
 MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" DRIVER_NAME);
index 15f2b2e242ea76b9ed882383f51a976a72d7b325..38f156745d533a666deae90f167dd6d51d718d54 100644 (file)
@@ -34,6 +34,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus);
 int bcma_bus_suspend(struct bcma_bus *bus);
 int bcma_bus_resume(struct bcma_bus *bus);
 #endif
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
 
 /* scan.c */
 void bcma_detect_chip(struct bcma_bus *bus);
index 9635f1033ce5c46e7aba2863fa04a8bc86421aa9..24882c18fcbe6911c7266aa4dcf42903ffb7dbef 100644 (file)
@@ -7,11 +7,14 @@
 
 #include "bcma_private.h"
 #include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
 #include <linux/platform_device.h>
+#include <linux/pci.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 
 MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
 MODULE_LICENSE("GPL");
@@ -268,6 +271,28 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
        }
 }
 
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
+{
+       switch (bus->hosttype) {
+       case BCMA_HOSTTYPE_PCI:
+               if (bus->host_pci)
+                       return &bus->host_pci->dev;
+               else
+                       return NULL;
+       case BCMA_HOSTTYPE_SOC:
+               if (bus->host_pdev)
+                       return &bus->host_pdev->dev;
+               else
+                       return NULL;
+       case BCMA_HOSTTYPE_SDIO:
+               if (bus->host_sdio)
+                       return &bus->host_sdio->dev;
+               else
+                       return NULL;
+       }
+       return NULL;
+}
+
 void bcma_init_bus(struct bcma_bus *bus)
 {
        mutex_lock(&bcma_buses_mutex);
@@ -387,6 +412,7 @@ int bcma_bus_register(struct bcma_bus *bus)
 {
        int err;
        struct bcma_device *core;
+       struct device *dev;
 
        /* Scan for devices (cores) */
        err = bcma_bus_scan(bus);
@@ -409,6 +435,16 @@ int bcma_bus_register(struct bcma_bus *bus)
                bcma_core_pci_early_init(&bus->drv_pci[0]);
        }
 
+       dev = bcma_bus_get_host_dev(bus);
+       /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
+        * of_default_bus_match_table is exported or in some other way
+        * accessible. This is just a temporary workaround.
+        */
+       if (IS_BUILTIN(CONFIG_BCMA) && dev) {
+               of_platform_populate(dev->of_node, of_default_bus_match_table,
+                                    NULL, dev);
+       }
+
        /* Cores providing flash access go before SPROM init */
        list_for_each_entry(core, &bus->cores, list) {
                if (bcma_is_core_needed_early(core->id.id))
index 69de41a87b74311b2b7478fb0226b8bc253c6ebc..3177b245d2bdf63e821a12a4c0f18cbab1b16229 100644 (file)
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
        while ((entry = llist_del_all(&cq->list)) != NULL) {
                entry = llist_reverse_order(entry);
                do {
+                       struct request_queue *q = NULL;
+
                        cmd = container_of(entry, struct nullb_cmd, ll_list);
                        entry = entry->next;
+                       if (cmd->rq)
+                               q = cmd->rq->q;
                        end_cmd(cmd);
 
-                       if (cmd->rq) {
-                               struct request_queue *q = cmd->rq->q;
-
-                               if (!q->mq_ops && blk_queue_stopped(q)) {
-                                       spin_lock(q->queue_lock);
-                                       if (blk_queue_stopped(q))
-                                               blk_start_queue(q);
-                                       spin_unlock(q->queue_lock);
-                               }
+                       if (q && !q->mq_ops && blk_queue_stopped(q)) {
+                               spin_lock(q->queue_lock);
+                               if (blk_queue_stopped(q))
+                                       blk_start_queue(q);
+                               spin_unlock(q->queue_lock);
                        }
                } while (entry);
        }
index d1d6141920d3ced742d850b051d273d65aa3728e..7920c2741b47d436b2ef37e99019f33938f2db04 100644 (file)
@@ -2108,8 +2108,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
                goto out_free_disk;
 
        add_disk(ns->disk);
-       if (ns->ms)
-               revalidate_disk(ns->disk);
+       if (ns->ms) {
+               struct block_device *bd = bdget_disk(ns->disk, 0);
+               if (!bd)
+                       return;
+               if (blkdev_get(bd, FMODE_READ, NULL)) {
+                       bdput(bd);
+                       return;
+               }
+               blkdev_reread_part(bd);
+               blkdev_put(bd, FMODE_READ);
+       }
        return;
  out_free_disk:
        kfree(disk);
index 2e777071e1dcb8bd544a16aedd58ca28ad2f43bc..79e8234b1aa5995eaf8b8e25d729e6da193518fe 100644 (file)
@@ -132,6 +132,7 @@ config BT_HCIUART_3WIRE
 config BT_HCIUART_INTEL
        bool "Intel protocol support"
        depends on BT_HCIUART
+       select BT_HCIUART_H4
        select BT_INTEL
        help
          The Intel protocol support enables Bluetooth HCI over serial
index fcfb72e9e0ee5948bf7e4c73e8d1b056d0723dbf..a5c4d0584389713522652c472cfd5c7b5e75dbab 100644 (file)
@@ -492,7 +492,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        case HCI_SCODATA_PKT:
                hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        /* Prepend skb with frame type */
        memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
index 7aab65427d388fc4c653223915ab033904032758..a00bb82eb7c6d8322f835c8ce5689eae784bc5f2 100644 (file)
@@ -427,7 +427,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        case HCI_SCODATA_PKT:
                hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        /* Prepend skb with frame type */
        memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
index 1e1a4323a71fd304d410e421b488758d2300b34d..02ed816a18f9a2b652bbd0d179d150f50cad0294 100644 (file)
@@ -34,6 +34,7 @@
 
 #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
 #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
+#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
 
 int btbcm_check_bdaddr(struct hci_dev *hdev)
 {
@@ -66,9 +67,13 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
         *
         * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
         * with waiting for configuration state.
+        *
+        * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller
+        * with waiting for configuration state.
         */
        if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
-           !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) {
+           !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
+           !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) {
                BT_INFO("%s: BCM: Using default device address (%pMR)",
                        hdev->name, &bda->bdaddr);
                set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -241,6 +246,7 @@ static const struct {
        u16 subver;
        const char *name;
 } bcm_uart_subver_table[] = {
+       { 0x4103, "BCM4330B1"   },      /* 002.001.003 */
        { 0x410e, "BCM43341B0"  },      /* 002.001.014 */
        { 0x4406, "BCM4324B3"   },      /* 002.004.006 */
        { 0x610c, "BCM4354"     },      /* 003.001.012 */
@@ -472,12 +478,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
 
        /* Read Verbose Config Version Info */
        skb = btbcm_read_verbose_config(hdev);
-       if (IS_ERR(skb))
-               return PTR_ERR(skb);
-
-       BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
-               get_unaligned_le16(skb->data + 5));
-       kfree_skb(skb);
+       if (!IS_ERR(skb)) {
+               BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+                       get_unaligned_le16(skb->data + 5));
+               kfree_skb(skb);
+       }
 
        set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
 
index 828f2f8d1568c8c50962dee7d8e77fcfcd669972..1ce4ac16c7facdd1387119619317ae8c3fcea5bc 100644 (file)
@@ -89,6 +89,86 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
 }
 EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
 
+void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+       struct sk_buff *skb;
+       u8 type = 0x00;
+
+       BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reset after hardware error failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return;
+       }
+       kfree_skb(skb);
+
+       skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return;
+       }
+
+       if (skb->len != 13) {
+               BT_ERR("%s: Exception info size mismatch", hdev->name);
+               kfree_skb(skb);
+               return;
+       }
+
+       BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+       kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(btintel_hw_error);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+       const char *variant;
+
+       switch (ver->fw_variant) {
+       case 0x06:
+               variant = "Bootloader";
+               break;
+       case 0x23:
+               variant = "Firmware";
+               break;
+       default:
+               return;
+       }
+
+       BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+               variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+               ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+EXPORT_SYMBOL_GPL(btintel_version_info);
+
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+                       const void *param)
+{
+       while (plen > 0) {
+               struct sk_buff *skb;
+               u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+               cmd_param[0] = fragment_type;
+               memcpy(cmd_param + 1, param, fragment_len);
+
+               skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+                                    cmd_param, HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb))
+                       return PTR_ERR(skb);
+
+               kfree_skb(skb);
+
+               plen -= fragment_len;
+               param += fragment_len;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_secure_send);
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
 MODULE_VERSION(VERSION);
index 4bda6ab34f60292da156b79314fcac9284aa4193..b278d14758d592896db033c78af53b6565a6c88f 100644 (file)
@@ -73,6 +73,11 @@ struct intel_secure_send_result {
 
 int btintel_check_bdaddr(struct hci_dev *hdev);
 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+void btintel_hw_error(struct hci_dev *hdev, u8 code);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+                       const void *param);
 
 #else
 
@@ -86,4 +91,18 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd
        return -EOPNOTSUPP;
 }
 
+static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+}
+
+static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+}
+
+static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+                                     u32 plen, const void *param)
+{
+       return -EOPNOTSUPP;
+}
+
 #endif
index 086f0ec89580627d4c95516c0ef912265445afef..27a9aac2558326c8f7801b891ef4b5f7441b1071 100644 (file)
@@ -95,10 +95,10 @@ struct btmrvl_private {
        struct btmrvl_device btmrvl_dev;
        struct btmrvl_adapter *adapter;
        struct btmrvl_thread main_thread;
-       int (*hw_host_to_card) (struct btmrvl_private *priv,
+       int (*hw_host_to_card)(struct btmrvl_private *priv,
                                u8 *payload, u16 nb);
-       int (*hw_wakeup_firmware) (struct btmrvl_private *priv);
-       int (*hw_process_int_status) (struct btmrvl_private *priv);
+       int (*hw_wakeup_firmware)(struct btmrvl_private *priv);
+       int (*hw_process_int_status)(struct btmrvl_private *priv);
        void (*firmware_dump)(struct btmrvl_private *priv);
        spinlock_t driver_lock;         /* spinlock used by driver */
 #ifdef CONFIG_DEBUG_FS
index b4cf8d9c9dac29893241cb9b814879969ed0906e..cc92b0f84a5168e139435737cef2c63ab1ee68e6 100644 (file)
@@ -68,6 +68,9 @@ static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth AMP device */
        { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
 
+       /* Generic Bluetooth USB interface */
+       { USB_INTERFACE_INFO(0xe0, 0x01, 0x01) },
+
        /* Apple-specific (Broadcom) devices */
        { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
          .driver_info = BTUSB_BCM_APPLE },
@@ -1878,51 +1881,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
        return -EILSEQ;
 }
 
-static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
-                                  u32 plen, const void *param)
-{
-       while (plen > 0) {
-               struct sk_buff *skb;
-               u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
-
-               cmd_param[0] = fragment_type;
-               memcpy(cmd_param + 1, param, fragment_len);
-
-               skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
-                                    cmd_param, HCI_INIT_TIMEOUT);
-               if (IS_ERR(skb))
-                       return PTR_ERR(skb);
-
-               kfree_skb(skb);
-
-               plen -= fragment_len;
-               param += fragment_len;
-       }
-
-       return 0;
-}
-
-static void btusb_intel_version_info(struct hci_dev *hdev,
-                                    struct intel_version *ver)
-{
-       const char *variant;
-
-       switch (ver->fw_variant) {
-       case 0x06:
-               variant = "Bootloader";
-               break;
-       case 0x23:
-               variant = "Firmware";
-               break;
-       default:
-               return;
-       }
-
-       BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
-               variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
-               ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
-}
-
 static int btusb_setup_intel_new(struct hci_dev *hdev)
 {
        static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
@@ -1984,7 +1942,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
                return -EINVAL;
        }
 
-       btusb_intel_version_info(hdev, ver);
+       btintel_version_info(hdev, ver);
 
        /* The firmware variant determines if the device is in bootloader
         * mode or is running operational firmware. The value 0x06 identifies
@@ -2104,7 +2062,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        /* Start the firmware download transaction with the Init fragment
         * represented by the 128 bytes of CSS header.
         */
-       err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+       err = btintel_secure_send(hdev, 0x00, 128, fw->data);
        if (err < 0) {
                BT_ERR("%s: Failed to send firmware header (%d)",
                       hdev->name, err);
@@ -2114,7 +2072,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        /* Send the 256 bytes of public key information from the firmware
         * as the PKey fragment.
         */
-       err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+       err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
        if (err < 0) {
                BT_ERR("%s: Failed to send firmware public key (%d)",
                       hdev->name, err);
@@ -2124,7 +2082,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        /* Send the 256 bytes of signature information from the firmware
         * as the Sign fragment.
         */
-       err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+       err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
        if (err < 0) {
                BT_ERR("%s: Failed to send firmware signature (%d)",
                       hdev->name, err);
@@ -2148,8 +2106,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
                 * firmware data buffer as a single Data fragement.
                 */
                if (!(frag_len % 4)) {
-                       err = btusb_intel_secure_send(hdev, 0x01, frag_len,
-                                                     fw_ptr);
+                       err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
                        if (err < 0) {
                                BT_ERR("%s: Failed to send firmware data (%d)",
                                       hdev->name, err);
@@ -2291,39 +2248,6 @@ done:
        return 0;
 }
 
-static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
-{
-       struct sk_buff *skb;
-       u8 type = 0x00;
-
-       BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
-
-       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               BT_ERR("%s: Reset after hardware error failed (%ld)",
-                      hdev->name, PTR_ERR(skb));
-               return;
-       }
-       kfree_skb(skb);
-
-       skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
-                      hdev->name, PTR_ERR(skb));
-               return;
-       }
-
-       if (skb->len != 13) {
-               BT_ERR("%s: Exception info size mismatch", hdev->name);
-               kfree_skb(skb);
-               return;
-       }
-
-       BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
-
-       kfree_skb(skb);
-}
-
 static int btusb_shutdown_intel(struct hci_dev *hdev)
 {
        struct sk_buff *skb;
@@ -2783,7 +2707,7 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_INTEL_NEW) {
                hdev->send = btusb_send_frame_intel;
                hdev->setup = btusb_setup_intel_new;
-               hdev->hw_error = btusb_hw_error_intel;
+               hdev->hw_error = btintel_hw_error;
                hdev->set_bdaddr = btintel_set_bdaddr;
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
        }
index 78e10f0c65b28dc38d30336514b92614bc7e449c..84135c54ed2e46df5111e2c2797b82fa8db4b143 100644 (file)
@@ -182,9 +182,9 @@ static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb)
        int i;
 
        printk(KERN_INFO "Bluetooth: Nokia control data =");
-       for (i = 0; i < skb->len; i++) {
+       for (i = 0; i < skb->len; i++)
                printk(" %02x", skb->data[i]);
-       }
+
        printk("\n");
 
        /* transition to active state */
@@ -406,7 +406,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
                break;
        default:
                return -EILSEQ;
-       };
+       }
 
        nsh.zero = 0;
        nsh.len = skb->len;
index 3455cecc9ecfe630c3331d67d22715ddb60cfcf8..b35b238a0380197fda6ee096e388a6fc09aa11a7 100644 (file)
@@ -75,7 +75,7 @@ struct h5 {
        size_t                  rx_pending;     /* Expecting more bytes */
        u8                      rx_ack;         /* Last ack number received */
 
-       int                     (*rx_func) (struct hci_uart *hu, u8 c);
+       int                     (*rx_func)(struct hci_uart *hu, u8 c);
 
        struct timer_list       timer;          /* Retransmission timer */
 
index 5dd07bf052360c15bc8631551def5e6cce687626..21dfa89751dfe6b790704d223d770d229ddf2c02 100644 (file)
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/wait.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
 #include "hci_uart.h"
+#include "btintel.h"
+
+#define STATE_BOOTLOADER       0
+#define STATE_DOWNLOADING      1
+#define STATE_FIRMWARE_LOADED  2
+#define STATE_FIRMWARE_FAILED  3
+#define STATE_BOOTING          4
+
+struct intel_data {
+       struct sk_buff *rx_skb;
+       struct sk_buff_head txq;
+       unsigned long flags;
+};
+
+static int intel_open(struct hci_uart *hu)
+{
+       struct intel_data *intel;
+
+       BT_DBG("hu %p", hu);
+
+       intel = kzalloc(sizeof(*intel), GFP_KERNEL);
+       if (!intel)
+               return -ENOMEM;
+
+       skb_queue_head_init(&intel->txq);
+
+       hu->priv = intel;
+       return 0;
+}
+
+static int intel_close(struct hci_uart *hu)
+{
+       struct intel_data *intel = hu->priv;
+
+       BT_DBG("hu %p", hu);
+
+       skb_queue_purge(&intel->txq);
+       kfree_skb(intel->rx_skb);
+       kfree(intel);
+
+       hu->priv = NULL;
+       return 0;
+}
+
+static int intel_flush(struct hci_uart *hu)
+{
+       struct intel_data *intel = hu->priv;
+
+       BT_DBG("hu %p", hu);
+
+       skb_queue_purge(&intel->txq);
+
+       return 0;
+}
+
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+       struct sk_buff *skb;
+       struct hci_event_hdr *hdr;
+       struct hci_ev_cmd_complete *evt;
+
+       skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+       hdr->evt = HCI_EV_CMD_COMPLETE;
+       hdr->plen = sizeof(*evt) + 1;
+
+       evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+       evt->ncmd = 0x01;
+       evt->opcode = cpu_to_le16(opcode);
+
+       *skb_put(skb, 1) = 0x00;
+
+       bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+       return hci_recv_frame(hdev, skb);
+}
+
+static int intel_setup(struct hci_uart *hu)
+{
+       static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+                                         0x00, 0x08, 0x04, 0x00 };
+       struct intel_data *intel = hu->priv;
+       struct hci_dev *hdev = hu->hdev;
+       struct sk_buff *skb;
+       struct intel_version *ver;
+       struct intel_boot_params *params;
+       const struct firmware *fw;
+       const u8 *fw_ptr;
+       char fwname[64];
+       u32 frag_len;
+       ktime_t calltime, delta, rettime;
+       unsigned long long duration;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+       calltime = ktime_get();
+
+       set_bit(STATE_BOOTLOADER, &intel->flags);
+
+       /* Read the Intel version information to determine if the device
+        * is in bootloader mode or if it already has operational firmware
+        * loaded.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading Intel version information failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*ver)) {
+               BT_ERR("%s: Intel version event size mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       ver = (struct intel_version *)skb->data;
+       if (ver->status) {
+               BT_ERR("%s: Intel version command failure (%02x)",
+                      hdev->name, ver->status);
+               err = -bt_to_errno(ver->status);
+               kfree_skb(skb);
+               return err;
+       }
+
+       /* The hardware platform number has a fixed value of 0x37 and
+        * for now only accept this single value.
+        */
+       if (ver->hw_platform != 0x37) {
+               BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+                      hdev->name, ver->hw_platform);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+        * supported by this firmware loading method. This check has been
+        * put in place to ensure correct forward compatibility options
+        * when newer hardware variants come along.
+        */
+       if (ver->hw_variant != 0x0b) {
+               BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+                      hdev->name, ver->hw_variant);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       btintel_version_info(hdev, ver);
+
+       /* The firmware variant determines if the device is in bootloader
+        * mode or is running operational firmware. The value 0x06 identifies
+        * the bootloader and the value 0x23 identifies the operational
+        * firmware.
+        *
+        * When the operational firmware is already present, then only
+        * the check for valid Bluetooth device address is needed. This
+        * determines if the device will be added as configured or
+        * unconfigured controller.
+        *
+        * It is not possible to use the Secure Boot Parameters in this
+        * case since that command is only available in bootloader mode.
+        */
+       if (ver->fw_variant == 0x23) {
+               kfree_skb(skb);
+               clear_bit(STATE_BOOTLOADER, &intel->flags);
+               btintel_check_bdaddr(hdev);
+               return 0;
+       }
+
+       /* If the device is not in bootloader mode, then the only possible
+        * choice is to return an error and abort the device initialization.
+        */
+       if (ver->fw_variant != 0x06) {
+               BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+                      hdev->name, ver->fw_variant);
+               kfree_skb(skb);
+               return -ENODEV;
+       }
+
+       kfree_skb(skb);
+
+       /* Read the secure boot parameters to identify the operating
+        * details of the bootloader.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*params)) {
+               BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       params = (struct intel_boot_params *)skb->data;
+       if (params->status) {
+               BT_ERR("%s: Intel boot parameters command failure (%02x)",
+                      hdev->name, params->status);
+               err = -bt_to_errno(params->status);
+               kfree_skb(skb);
+               return err;
+       }
+
+       BT_INFO("%s: Device revision is %u", hdev->name,
+               le16_to_cpu(params->dev_revid));
+
+       BT_INFO("%s: Secure boot is %s", hdev->name,
+               params->secure_boot ? "enabled" : "disabled");
+
+       BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+               params->min_fw_build_nn, params->min_fw_build_cw,
+               2000 + params->min_fw_build_yy);
+
+       /* It is required that every single firmware fragment is acknowledged
+        * with a command complete event. If the boot parameters indicate
+        * that this bootloader does not send them, then abort the setup.
+        */
+       if (params->limited_cce != 0x00) {
+               BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+                      hdev->name, params->limited_cce);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       /* If the OTP has no valid Bluetooth device address, then there will
+        * also be no valid address for the operational firmware.
+        */
+       if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+               BT_INFO("%s: No device address configured", hdev->name);
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+       }
+
+       /* With this Intel bootloader only the hardware variant and device
+        * revision information are used to select the right firmware.
+        *
+        * Currently this bootloader support is limited to hardware variant
+        * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+        */
+       snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+                le16_to_cpu(params->dev_revid));
+
+       err = request_firmware(&fw, fwname, &hdev->dev);
+       if (err < 0) {
+               BT_ERR("%s: Failed to load Intel firmware file (%d)",
+                      hdev->name, err);
+               kfree_skb(skb);
+               return err;
+       }
+
+       BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+       kfree_skb(skb);
+
+       if (fw->size < 644) {
+               BT_ERR("%s: Invalid size of firmware file (%zu)",
+                      hdev->name, fw->size);
+               err = -EBADF;
+               goto done;
+       }
+
+       set_bit(STATE_DOWNLOADING, &intel->flags);
+
+       /* Start the firmware download transaction with the Init fragment
+        * represented by the 128 bytes of CSS header.
+        */
+       err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware header (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       /* Send the 256 bytes of public key information from the firmware
+        * as the PKey fragment.
+        */
+       err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware public key (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       /* Send the 256 bytes of signature information from the firmware
+        * as the Sign fragment.
+        */
+       err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware signature (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       fw_ptr = fw->data + 644;
+       frag_len = 0;
+
+       while (fw_ptr - fw->data < fw->size) {
+               struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+               frag_len += sizeof(*cmd) + cmd->plen;
+
+               BT_DBG("%s: patching %td/%zu", hdev->name,
+                      (fw_ptr - fw->data), fw->size);
+
+               /* The parameter length of the secure send command requires
+                * a 4 byte alignment. It happens so that the firmware file
+                * contains proper Intel_NOP commands to align the fragments
+                * as needed.
+                *
+                * Send set of commands with 4 byte alignment from the
+                * firmware data buffer as a single Data fragement.
+                */
+               if (frag_len % 4)
+                       continue;
+
+               /* Send each command from the firmware data buffer as
+                * a single Data fragment.
+                */
+               err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+               if (err < 0) {
+                       BT_ERR("%s: Failed to send firmware data (%d)",
+                              hdev->name, err);
+                       goto done;
+               }
+
+               fw_ptr += frag_len;
+               frag_len = 0;
+       }
+
+       set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
+
+       BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+       /* Before switching the device into operational mode and with that
+        * booting the loaded firmware, wait for the bootloader notification
+        * that all fragments have been successfully received.
+        *
+        * When the event processing receives the notification, then the
+        * STATE_DOWNLOADING flag will be cleared.
+        *
+        * The firmware loading should not take longer than 5 seconds
+        * and thus just timeout if that happens and fail the setup
+        * of this device.
+        */
+       err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
+                                 TASK_INTERRUPTIBLE,
+                                 msecs_to_jiffies(5000));
+       if (err == 1) {
+               BT_ERR("%s: Firmware loading interrupted", hdev->name);
+               err = -EINTR;
+               goto done;
+       }
+
+       if (err) {
+               BT_ERR("%s: Firmware loading timeout", hdev->name);
+               err = -ETIMEDOUT;
+               goto done;
+       }
+
+       if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
+               BT_ERR("%s: Firmware loading failed", hdev->name);
+               err = -ENOEXEC;
+               goto done;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+       release_firmware(fw);
+
+       if (err < 0)
+               return err;
+
+       calltime = ktime_get();
+
+       set_bit(STATE_BOOTING, &intel->flags);
+
+       skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       kfree_skb(skb);
+
+       /* The bootloader will not indicate when the device is ready. This
+        * is done by the operational firmware sending bootup notification.
+        *
+        * Booting into operational firmware should not take longer than
+        * 1 second. However if that happens, then just fail the setup
+        * since something went wrong.
+        */
+       BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+       err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING,
+                                 TASK_INTERRUPTIBLE,
+                                 msecs_to_jiffies(1000));
+
+       if (err == 1) {
+               BT_ERR("%s: Device boot interrupted", hdev->name);
+               return -EINTR;
+       }
+
+       if (err) {
+               BT_ERR("%s: Device boot timeout", hdev->name);
+               return -ETIMEDOUT;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+       clear_bit(STATE_BOOTLOADER, &intel->flags);
+
+       return 0;
+}
+
+static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct intel_data *intel = hu->priv;
+       struct hci_event_hdr *hdr;
+
+       if (!test_bit(STATE_BOOTLOADER, &intel->flags))
+               goto recv;
+
+       hdr = (void *)skb->data;
+
+       /* When the firmware loading completes the device sends
+        * out a vendor specific event indicating the result of
+        * the firmware loading.
+        */
+       if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+           skb->data[2] == 0x06) {
+               if (skb->data[3] != 0x00)
+                       set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
+
+               if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
+                   test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) {
+                       smp_mb__after_atomic();
+                       wake_up_bit(&intel->flags, STATE_DOWNLOADING);
+               }
+
+       /* When switching to the operational firmware the device
+        * sends a vendor specific event indicating that the bootup
+        * completed.
+        */
+       } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+                  skb->data[2] == 0x02) {
+               if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) {
+                       smp_mb__after_atomic();
+                       wake_up_bit(&intel->flags, STATE_BOOTING);
+               }
+       }
+recv:
+       return hci_recv_frame(hdev, skb);
+}
+
+static const struct h4_recv_pkt intel_recv_pkts[] = {
+       { H4_RECV_ACL,   .recv = hci_recv_frame },
+       { H4_RECV_SCO,   .recv = hci_recv_frame },
+       { H4_RECV_EVENT, .recv = intel_recv_event },
+};
+
+static int intel_recv(struct hci_uart *hu, const void *data, int count)
+{
+       struct intel_data *intel = hu->priv;
+
+       if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+               return -EUNATCH;
+
+       intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+                                   intel_recv_pkts,
+                                   ARRAY_SIZE(intel_recv_pkts));
+       if (IS_ERR(intel->rx_skb)) {
+               int err = PTR_ERR(intel->rx_skb);
+               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+               intel->rx_skb = NULL;
+               return err;
+       }
+
+       return count;
+}
+
+static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+       struct intel_data *intel = hu->priv;
+
+       BT_DBG("hu %p skb %p", hu, skb);
+
+       skb_queue_tail(&intel->txq, skb);
+
+       return 0;
+}
+
+static struct sk_buff *intel_dequeue(struct hci_uart *hu)
+{
+       struct intel_data *intel = hu->priv;
+       struct sk_buff *skb;
+
+       skb = skb_dequeue(&intel->txq);
+       if (!skb)
+               return skb;
+
+       if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
+           (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) {
+               struct hci_command_hdr *cmd = (void *)skb->data;
+               __u16 opcode = le16_to_cpu(cmd->opcode);
+
+               /* When the 0xfc01 command is issued to boot into
+                * the operational firmware, it will actually not
+                * send a command complete event. To keep the flow
+                * control working inject that event here.
+                */
+               if (opcode == 0xfc01)
+                       inject_cmd_complete(hu->hdev, opcode);
+       }
+
+       /* Prepend skb with frame type */
+       memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+       return skb;
+}
+
+static const struct hci_uart_proto intel_proto = {
+       .id             = HCI_UART_INTEL,
+       .name           = "Intel",
+       .init_speed     = 115200,
+       .open           = intel_open,
+       .close          = intel_close,
+       .flush          = intel_flush,
+       .setup          = intel_setup,
+       .recv           = intel_recv,
+       .enqueue        = intel_enqueue,
+       .dequeue        = intel_dequeue,
+};
+
+int __init intel_init(void)
+{
+       return hci_uart_register_proto(&intel_proto);
+}
+
+int __exit intel_deinit(void)
+{
+       return hci_uart_unregister_proto(&intel_proto);
+}
index 177dd69fdd954151c3742c5cf443d439ad0edc0c..20c2ac193ff972a9ba8717092f285f81f9aaad59 100644 (file)
@@ -770,7 +770,7 @@ static int __init hci_uart_init(void)
 
        /* Register the tty discipline */
 
-       memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc));
+       memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
        hci_uart_ldisc.magic            = TTY_LDISC_MAGIC;
        hci_uart_ldisc.name             = "n_hci";
        hci_uart_ldisc.open             = hci_uart_tty_open;
@@ -804,6 +804,9 @@ static int __init hci_uart_init(void)
 #ifdef CONFIG_BT_HCIUART_3WIRE
        h5_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+       intel_init();
+#endif
 #ifdef CONFIG_BT_HCIUART_BCM
        bcm_init();
 #endif
@@ -830,6 +833,9 @@ static void __exit hci_uart_exit(void)
 #ifdef CONFIG_BT_HCIUART_3WIRE
        h5_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+       intel_deinit();
+#endif
 #ifdef CONFIG_BT_HCIUART_BCM
        bcm_deinit();
 #endif
index ce9c670956f54d414aadaec5f9810fac7e058abd..496587a73a9daa4a2a70ef92bd9fc04b0ef72dbf 100644 (file)
@@ -167,6 +167,11 @@ int h5_init(void);
 int h5_deinit(void);
 #endif
 
+#ifdef CONFIG_BT_HCIUART_INTEL
+int intel_init(void);
+int intel_deinit(void);
+#endif
+
 #ifdef CONFIG_BT_HCIUART_BCM
 int bcm_init(void);
 int bcm_deinit(void);
index 283f00a7f03629c12f8294e2aee67e1e598acf4d..1082d4bb016a9e4d7caf795a5128509780134916 100644 (file)
@@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
 
        device_initialize(&chip->dev);
 
-       chip->cdev.owner = chip->pdev->driver->owner;
        cdev_init(&chip->cdev, &tpm_fops);
+       chip->cdev.owner = chip->pdev->driver->owner;
+       chip->cdev.kobj.parent = &chip->dev.kobj;
 
        return chip;
 }
index 44f9d20c19ac6e7724b66fae3329ae7111c8e61e..1267322595da0ec0a53bd10a8ba93531a036f483 100644 (file)
@@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
                return -ENODEV;
        }
 
+       /* At least some versions of AMI BIOS have a bug that TPM2 table has
+        * zero address for the control area and therefore we must fail.
+       */
+       if (!buf->control_area_pa) {
+               dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
+               return -EINVAL;
+       }
+
        if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
                dev_err(dev, "TPM2 ACPI table has wrong size");
                return -EINVAL;
index bdfb4421c64365586b19b478c5288dd58c27f829..f271c350ef9404838fcaa6502db28cd1db5a744c 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index dffd4ce6c8b5513e8dd4fdadcc06a68cc3a209fe..58d678b5b40a76c6e426b41ed957e044197b2ae8 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 1afc18c4effcc150f6bda6dede54e134e9ab4d68..1a722e99e76e949352ab03120440ec3e312c365d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 1b9b65bca51edbf65ba6ba62d39d7520a19f7453..5ebddc528145bb82827c2a6895b5467fb6dfaeca 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 628b6d5ed3d96a35b799d55e2467f7970ea5384d..157fe099ea6ad6b7fb8904d390de41b53c982489 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 931737677dfab553032f8a720893c719c4f0a639..9834944f08b1dafff2a24835a5b1e57b010e3e4a 100644 (file)
@@ -2,7 +2,7 @@
  * Clock framework definitions for SPEAr platform
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4daa5977793ae058803c178581c0dbdc2409b2e9..222ce108b41a0e48afd4bf2d370a535d24511774 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1310 machine clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 5a5c6648308dbc6f8009e6d5702dce3d95572532..973c9d3fbcf82285e2a6bf6412cb15fe602a892e 100644 (file)
@@ -4,7 +4,7 @@
  * SPEAr1340 machine clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index bb5f387774e2ce364b0e660c5d22271282b77084..404a55edd613d102f00bda3e127f7125b07fed88 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr3xx machines clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 4f649c9cb094e2c9019cc5528d64819a632e0489..231061fa73a4309b0744d2a8dda0d68bddeda5e0 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr6xx machines clock framework source file
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index b612411655f99309929ae760887c05a8608d01d6..7a3c30c4336f3cb9b7b67d5965b77175b4cad53f 100644 (file)
@@ -169,6 +169,15 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
 }
 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
 
+struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
+{
+       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+       return policy && !policy_is_inactive(policy) ?
+               policy->freq_table : NULL;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
+
 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 {
        u64 idle_time;
@@ -993,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
        int ret = 0;
 
        /* Some related CPUs might not be present (physically hotplugged) */
-       for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+       for_each_cpu(j, policy->real_cpus) {
                if (j == policy->kobj_cpu)
                        continue;
 
@@ -1010,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
        unsigned int j;
 
        /* Some related CPUs might not be present (physically hotplugged) */
-       for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
+       for_each_cpu(j, policy->real_cpus) {
                if (j == policy->kobj_cpu)
                        continue;
 
@@ -1132,6 +1141,7 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
 
                down_write(&policy->rwsem);
                policy->cpu = cpu;
+               policy->governor = NULL;
                up_write(&policy->rwsem);
        }
 
@@ -1153,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
        if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
                goto err_free_cpumask;
 
+       if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
+               goto err_free_rcpumask;
+
        ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
                                   "cpufreq");
        if (ret) {
                pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
-               goto err_free_rcpumask;
+               goto err_free_real_cpus;
        }
 
        INIT_LIST_HEAD(&policy->policy_list);
@@ -1174,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
 
        return policy;
 
+err_free_real_cpus:
+       free_cpumask_var(policy->real_cpus);
 err_free_rcpumask:
        free_cpumask_var(policy->related_cpus);
 err_free_cpumask:
@@ -1224,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
        cpufreq_policy_put_kobj(policy, notify);
+       free_cpumask_var(policy->real_cpus);
        free_cpumask_var(policy->related_cpus);
        free_cpumask_var(policy->cpus);
        kfree(policy);
@@ -1248,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
 
        pr_debug("adding CPU %u\n", cpu);
 
-       /*
-        * Only possible if 'cpu' wasn't physically present earlier and we are
-        * here from subsys_interface add callback. A hotplug notifier will
-        * follow and we will handle it like logical CPU hotplug then. For now,
-        * just create the sysfs link.
-        */
-       if (cpu_is_offline(cpu))
-               return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
+       if (cpu_is_offline(cpu)) {
+               /*
+                * Only possible if we are here from the subsys_interface add
+                * callback.  A hotplug notifier will follow and we will handle
+                * it as CPU online then.  For now, just create the sysfs link,
+                * unless there is no policy or the link is already present.
+                */
+               policy = per_cpu(cpufreq_cpu_data, cpu);
+               return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
+                       ? add_cpu_dev_symlink(policy, cpu) : 0;
+       }
 
        if (!down_read_trylock(&cpufreq_rwsem))
                return 0;
@@ -1297,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
        /* related cpus should atleast have policy->cpus */
        cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
 
+       /* Remember which CPUs have been present at the policy creation time. */
+       if (!recover_policy)
+               cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
+
        /*
         * affected cpus must always be the one, which are online. We aren't
         * managing offline cpus here.
@@ -1410,8 +1433,7 @@ nomem_out:
        return ret;
 }
 
-static int __cpufreq_remove_dev_prepare(struct device *dev,
-                                       struct subsys_interface *sif)
+static int __cpufreq_remove_dev_prepare(struct device *dev)
 {
        unsigned int cpu = dev->id;
        int ret = 0;
@@ -1427,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
 
        if (has_target()) {
                ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               if (ret) {
+               if (ret)
                        pr_err("%s: Failed to stop governor\n", __func__);
-                       return ret;
-               }
        }
 
        down_write(&policy->rwsem);
@@ -1463,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
        return ret;
 }
 
-static int __cpufreq_remove_dev_finish(struct device *dev,
-                                      struct subsys_interface *sif)
+static int __cpufreq_remove_dev_finish(struct device *dev)
 {
        unsigned int cpu = dev->id;
        int ret;
@@ -1482,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
        /* If cpu is last user of policy, free policy */
        if (has_target()) {
                ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
-               if (ret) {
+               if (ret)
                        pr_err("%s: Failed to exit governor\n", __func__);
-                       return ret;
-               }
        }
 
        /*
@@ -1496,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
        if (cpufreq_driver->exit)
                cpufreq_driver->exit(policy);
 
-       /* Free the policy only if the driver is getting removed. */
-       if (sif)
-               cpufreq_policy_free(policy, true);
-
        return 0;
 }
 
@@ -1511,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
        unsigned int cpu = dev->id;
-       int ret;
-
-       /*
-        * Only possible if 'cpu' is getting physically removed now. A hotplug
-        * notifier should have already been called and we just need to remove
-        * link or free policy here.
-        */
-       if (cpu_is_offline(cpu)) {
-               struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
-               struct cpumask mask;
+       struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 
-               if (!policy)
-                       return 0;
+       if (!policy)
+               return 0;
 
-               cpumask_copy(&mask, policy->related_cpus);
-               cpumask_clear_cpu(cpu, &mask);
+       if (cpu_online(cpu)) {
+               __cpufreq_remove_dev_prepare(dev);
+               __cpufreq_remove_dev_finish(dev);
+       }
 
-               /*
-                * Free policy only if all policy->related_cpus are removed
-                * physically.
-                */
-               if (cpumask_intersects(&mask, cpu_present_mask)) {
-                       remove_cpu_dev_symlink(policy, cpu);
-                       return 0;
-               }
+       cpumask_clear_cpu(cpu, policy->real_cpus);
 
+       if (cpumask_empty(policy->real_cpus)) {
                cpufreq_policy_free(policy, true);
                return 0;
        }
 
-       ret = __cpufreq_remove_dev_prepare(dev, sif);
+       if (cpu != policy->kobj_cpu) {
+               remove_cpu_dev_symlink(policy, cpu);
+       } else {
+               /*
+                * The CPU owning the policy object is going away.  Move it to
+                * another suitable CPU.
+                */
+               unsigned int new_cpu = cpumask_first(policy->real_cpus);
+               struct device *new_dev = get_cpu_device(new_cpu);
 
-       if (!ret)
-               ret = __cpufreq_remove_dev_finish(dev, sif);
+               dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
 
-       return ret;
+               sysfs_remove_link(&new_dev->kobj, "cpufreq");
+               policy->kobj_cpu = new_cpu;
+               WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
+       }
+
+       return 0;
 }
 
 static void handle_update(struct work_struct *work)
@@ -2385,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
                        break;
 
                case CPU_DOWN_PREPARE:
-                       __cpufreq_remove_dev_prepare(dev, NULL);
+                       __cpufreq_remove_dev_prepare(dev);
                        break;
 
                case CPU_POST_DEAD:
-                       __cpufreq_remove_dev_finish(dev, NULL);
+                       __cpufreq_remove_dev_finish(dev);
                        break;
 
                case CPU_DOWN_FAILED:
index df14766a8e06b7b692807c08d1bf27d683a339eb..dfbbf981ed56fdfd52b147b5c693f764696a56c8 100644 (file)
@@ -297,15 +297,6 @@ int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
 
-struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu);
-
-struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
-{
-       struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
-       return policy ? policy->freq_table : NULL;
-}
-EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
-
 MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("CPUfreq frequency table helpers");
 MODULE_LICENSE("GPL");
index 15ada47bb720b710454795d8d7e83c235c2fccfc..fcb929ec5304a9b233f0d2a55be394e5c1ed25f1 100644 (file)
@@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
                .get_max = core_get_max_pstate,
                .get_min = core_get_min_pstate,
                .get_turbo = knl_get_turbo_pstate,
+               .get_scaling = core_get_scaling,
                .set = core_set_pstate,
        },
 };
index e8e2775c3821e26dc179ea6e300025d0cf6e75e5..48b7228563ad7b024b17d49dc8ff9b675049f587 100644 (file)
@@ -112,7 +112,12 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
 static void enter_freeze_proper(struct cpuidle_driver *drv,
                                struct cpuidle_device *dev, int index)
 {
-       tick_freeze();
+       /*
+        * trace_suspend_resume() called by tick_freeze() for the last CPU
+        * executing it contains RCU usage regarded as invalid in the idle
+        * context, so tell RCU about that.
+        */
+       RCU_NONIDLE(tick_freeze());
        /*
         * The state used here cannot be a "coupled" one, because the "coupled"
         * cpuidle mechanism enables interrupts and doing that with timekeeping
@@ -122,7 +127,7 @@ static void enter_freeze_proper(struct cpuidle_driver *drv,
        WARN_ON(!irqs_disabled());
        /*
         * timekeeping_resume() that will be called by tick_unfreeze() for the
-        * last CPU executing it calls functions containing RCU read-side
+        * first CPU executing it calls functions containing RCU read-side
         * critical sections, so tell RCU about that.
         */
        RCU_NONIDLE(tick_unfreeze());
index 1022c2e1a2b0adeac7e5954cd55363e2df7f7dab..cf1c87fa1edd557eb57f53dd41c11c02a440ea82 100644 (file)
@@ -1746,4 +1746,4 @@ EXPORT_SYMBOL_GPL(dw_dma_enable);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
index 4fd9961d552e8a0c12604f1cfef83f2e15bea66d..d4253742543841d6d261dfea22156259b0a13183 100644 (file)
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
        return ret;
 }
 
-static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
+static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
+       int len)
 {
        struct cper_mem_err_compact cmem;
 
+       /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
+       if (len == sizeof(struct cper_sec_mem_err_old) &&
+           (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
+               pr_err(FW_WARN "valid bits set for fields beyond structure\n");
+               return;
+       }
        if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
                printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
        if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
        } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
                struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
                printk("%s""section_type: memory error\n", newpfx);
-               if (gdata->error_data_length >= sizeof(*mem_err))
-                       cper_print_mem(newpfx, mem_err);
+               if (gdata->error_data_length >=
+                   sizeof(struct cper_sec_mem_err_old))
+                       cper_print_mem(newpfx, mem_err,
+                                      gdata->error_data_length);
                else
                        goto err_section_too_small;
        } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
index 7a3cb1fa0a766a55996f1ae13853905a95e20570..4630a8133ea6b94726a84c48486ff75eb4075bb2 100644 (file)
@@ -87,6 +87,15 @@ static int brcmstb_gpio_remove(struct platform_device *pdev)
        struct brcmstb_gpio_bank *bank;
        int ret = 0;
 
+       if (!priv) {
+               dev_err(&pdev->dev, "called %s without drvdata!\n", __func__);
+               return -EFAULT;
+       }
+
+       /*
+        * You can lose return values below, but we report all errors, and it's
+        * more important to actually perform all of the steps.
+        */
        list_for_each(pos, &priv->bank_list) {
                bank = list_entry(pos, struct brcmstb_gpio_bank, node);
                ret = bgpio_remove(&bank->bgc);
@@ -143,6 +152,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
+       platform_set_drvdata(pdev, priv);
+       INIT_LIST_HEAD(&priv->bank_list);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        reg_base = devm_ioremap_resource(dev, res);
@@ -153,7 +164,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
        priv->reg_base = reg_base;
        priv->pdev = pdev;
 
-       INIT_LIST_HEAD(&priv->bank_list);
        if (brcmstb_gpio_sanity_check_banks(dev, np, res))
                return -EINVAL;
 
@@ -221,8 +231,6 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
        dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
                        priv->num_banks, priv->gpio_base, gpio_base - 1);
 
-       platform_set_drvdata(pdev, priv);
-
        return 0;
 
 fail:
index c5e05c82d67c6c2cd8fdeac5996413e99455a3d1..c246ac3dda7ca2ba0cfa443b8e0175dc176d8da9 100644 (file)
@@ -578,15 +578,13 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
                writel_relaxed(~0, &g->clr_falling);
                writel_relaxed(~0, &g->clr_rising);
 
-               /* set up all irqs in this bank */
-               irq_set_chained_handler(bank_irq, gpio_irq_handler);
-
                /*
                 * Each chip handles 32 gpios, and each irq bank consists of 16
                 * gpio irqs. Pass the irq bank's corresponding controller to
                 * the chained irq handler.
                 */
-               irq_set_handler_data(bank_irq, &chips[gpio / 32]);
+               irq_set_chained_handler_and_data(bank_irq, gpio_irq_handler,
+                                                &chips[gpio / 32]);
 
                binten |= BIT(bank);
        }
index aed4ca9338bca1e15d8a3052438d68635661e01e..7d3c90e9da71a313fcbac511097f159c80b15132 100644 (file)
@@ -603,6 +603,7 @@ static int max732x_setup_gpio(struct max732x_chip *chip,
        gc->base = gpio_start;
        gc->ngpio = port;
        gc->label = chip->client->name;
+       gc->dev = &chip->client->dev;
        gc->owner = THIS_MODULE;
 
        return port;
index b0c57d505be75ac133455a287dc092ff83f3283c..61a731ff9a076fccd0c9b2bdc15d3ca10ce5e21c 100644 (file)
@@ -500,8 +500,10 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
 
        spin_lock_irqsave(&bank->lock, flags);
        retval = omap_set_gpio_triggering(bank, offset, type);
-       if (retval)
+       if (retval) {
+               spin_unlock_irqrestore(&bank->lock, flags);
                goto error;
+       }
        omap_gpio_init_irq(bank, offset);
        if (!omap_gpio_is_input(bank, offset)) {
                spin_unlock_irqrestore(&bank->lock, flags);
@@ -1185,6 +1187,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
        bank->irq = res->start;
        bank->dev = dev;
        bank->chip.dev = dev;
+       bank->chip.owner = THIS_MODULE;
        bank->dbck_flag = pdata->dbck_flag;
        bank->stride = pdata->bank_stride;
        bank->width = pdata->bank_width;
index d233eb3b81323342bb5a22122c1b1f570678c8b1..50caeb1ee3509da04b57d38dd332396e76ea25de 100644 (file)
@@ -570,6 +570,10 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
                                "could not connect irqchip to gpiochip\n");
                        return ret;
                }
+
+               gpiochip_set_chained_irqchip(&chip->gpio_chip,
+                                            &pca953x_irq_chip,
+                                            client->irq, NULL);
        }
 
        return 0;
index 77fe5d3cb105b97057aab4b900ba595a837f4e50..d5284dfe01fe167e9da6c6c4f8342d67d8380b20 100644 (file)
@@ -220,9 +220,9 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc)
        if (!chip->gpio_width[1])
                return;
 
-       xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_TRI_OFFSET,
+       xgpio_writereg(mm_gc->regs + XGPIO_DATA_OFFSET + XGPIO_CHANNEL_OFFSET,
                       chip->gpio_state[1]);
-       xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_TRI_OFFSET,
+       xgpio_writereg(mm_gc->regs + XGPIO_TRI_OFFSET + XGPIO_CHANNEL_OFFSET,
                       chip->gpio_dir[1]);
 }
 
index 2e87c4b8da26d5336164359a6a6e27f0a32de68c..a78882389836071510bbcd5548ce426e894868ab 100644 (file)
@@ -757,6 +757,7 @@ static int zynq_gpio_remove(struct platform_device *pdev)
        gpiochip_remove(&gpio->chip);
        clk_disable_unprepare(gpio->clk);
        device_set_wakeup_capable(&pdev->dev, 0);
+       pm_runtime_disable(&pdev->dev);
        return 0;
 }
 
index 01657830b470a49e8209fd39fa829d4a1fbb3610..31b00f91cfcd5a04848be288837d6d90c0110f44 100644 (file)
@@ -1614,6 +1614,9 @@ struct amdgpu_uvd {
 #define AMDGPU_MAX_VCE_HANDLES 16
 #define AMDGPU_VCE_FIRMWARE_OFFSET 256
 
+#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
+#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+
 struct amdgpu_vce {
        struct amdgpu_bo        *vcpu_bo;
        uint64_t                gpu_addr;
@@ -1626,6 +1629,7 @@ struct amdgpu_vce {
        const struct firmware   *fw;    /* VCE firmware */
        struct amdgpu_ring      ring[AMDGPU_MAX_VCE_RINGS];
        struct amdgpu_irq_src   irq;
+       unsigned                harvest_config;
 };
 
 /*
@@ -1862,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
 
+struct amdgpu_ip_block_status {
+       bool valid;
+       bool sw;
+       bool hw;
+};
+
 struct amdgpu_device {
        struct device                   *dev;
        struct drm_device               *ddev;
@@ -2004,7 +2014,7 @@ struct amdgpu_device {
 
        const struct amdgpu_ip_block_version *ip_blocks;
        int                             num_ip_blocks;
-       bool                            *ip_block_enabled;
+       struct amdgpu_ip_block_status   *ip_block_status;
        struct mutex    mn_lock;
        DECLARE_HASHTABLE(mn_hash, 7);
 
index d63135bf29c0c258f72025fa6f41f34677576ec4..1f040d85ac47fe336f609a0cf9d39ac59b94f665 100644 (file)
@@ -669,6 +669,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                                  struct amdgpu_cs_parser *p)
 {
+       struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_ib *ib;
        int i, j, r;
 
@@ -694,6 +695,7 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                for (j = 0; j < num_deps; ++j) {
                        struct amdgpu_fence *fence;
                        struct amdgpu_ring *ring;
+                       struct amdgpu_ctx *ctx;
 
                        r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
                                               deps[j].ip_instance,
@@ -701,14 +703,21 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                        if (r)
                                return r;
 
+                       ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
+                       if (ctx == NULL)
+                               return -EINVAL;
+
                        r = amdgpu_fence_recreate(ring, p->filp,
                                                  deps[j].handle,
                                                  &fence);
-                       if (r)
+                       if (r) {
+                               amdgpu_ctx_put(ctx);
                                return r;
+                       }
 
                        amdgpu_sync_fence(&ib->sync, fence);
                        amdgpu_fence_unref(&fence);
+                       amdgpu_ctx_put(ctx);
                }
        }
 
@@ -808,12 +817,16 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
 
        r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
                               wait->in.ring, &ring);
-       if (r)
+       if (r) {
+               amdgpu_ctx_put(ctx);
                return r;
+       }
 
        r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
-       if (r)
+       if (r) {
+               amdgpu_ctx_put(ctx);
                return r;
+       }
 
        r = fence_wait_timeout(&fence->base, true, timeout);
        amdgpu_fence_unref(&fence);
index ba46be361c9b2c9f40bf0acb751e65b156d4b171..99f158e1baffa711073d1251d6d05c72cb68976e 100644 (file)
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
-       if (adev->ip_block_enabled == NULL)
+       adev->ip_block_status = kcalloc(adev->num_ip_blocks,
+                                       sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
+       if (adev->ip_block_status == NULL)
                return -ENOMEM;
 
        if (adev->ip_blocks == NULL) {
@@ -1203,14 +1204,19 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
                        DRM_ERROR("disabled ip block: %d\n", i);
-                       adev->ip_block_enabled[i] = false;
+                       adev->ip_block_status[i].valid = false;
                } else {
                        if (adev->ip_blocks[i].funcs->early_init) {
                                r = adev->ip_blocks[i].funcs->early_init((void *)adev);
-                               if (r)
+                               if (r == -ENOENT)
+                                       adev->ip_block_status[i].valid = false;
+                               else if (r)
                                        return r;
+                               else
+                                       adev->ip_block_status[i].valid = true;
+                       } else {
+                               adev->ip_block_status[i].valid = true;
                        }
-                       adev->ip_block_enabled[i] = true;
                }
        }
 
@@ -1222,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
        int i, r;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].valid)
                        continue;
                r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
                if (r)
                        return r;
+               adev->ip_block_status[i].sw = true;
                /* need to do gmc hw init early so we can allocate gpu mem */
                if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
                        r = amdgpu_vram_scratch_init(adev);
@@ -1238,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
                        r = amdgpu_wb_init(adev);
                        if (r)
                                return r;
+                       adev->ip_block_status[i].hw = true;
                }
        }
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].sw)
                        continue;
                /* gmc hw init is done early */
                if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1250,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
                r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
                if (r)
                        return r;
+               adev->ip_block_status[i].hw = true;
        }
 
        return 0;
@@ -1260,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
        int i = 0, r;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].valid)
                        continue;
                /* enable clockgating to save power */
                r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1282,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
        int i, r;
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].hw)
                        continue;
                if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
                        amdgpu_wb_fini(adev);
@@ -1295,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
                        return r;
                r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
                /* XXX handle errors */
+               adev->ip_block_status[i].hw = false;
        }
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].sw)
                        continue;
                r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
                /* XXX handle errors */
-               adev->ip_block_enabled[i] = false;
+               adev->ip_block_status[i].sw = false;
+               adev->ip_block_status[i].valid = false;
        }
 
        return 0;
@@ -1313,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
        int i, r;
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].valid)
                        continue;
                /* ungate blocks so that suspend can properly shut them down */
                r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1331,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
        int i, r;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
-               if (!adev->ip_block_enabled[i])
+               if (!adev->ip_block_status[i].valid)
                        continue;
                r = adev->ip_blocks[i].funcs->resume(adev);
                if (r)
@@ -1577,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        amdgpu_fence_driver_fini(adev);
        amdgpu_fbdev_fini(adev);
        r = amdgpu_fini(adev);
-       kfree(adev->ip_block_enabled);
-       adev->ip_block_enabled = NULL;
+       kfree(adev->ip_block_status);
+       adev->ip_block_status = NULL;
        adev->accel_working = false;
        /* free i2c buses */
        amdgpu_i2c_fini(adev);
index ae43b58c9733a1962cbd6dae4ce42fb8c52fa81a..4afc507820c01db355600631e67f98a3e5d4a644 100644 (file)
@@ -449,7 +449,7 @@ out:
  * vital here, so they are not reported back to userspace.
  */
 static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
-                                   struct amdgpu_bo_va *bo_va)
+                                   struct amdgpu_bo_va *bo_va, uint32_t operation)
 {
        struct ttm_validate_buffer tv, *entry;
        struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
        if (r)
                goto error_unlock;
 
-       r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
+
+       if (operation == AMDGPU_VA_OP_MAP)
+               r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
 
 error_unlock:
        mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
        }
 
        if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
-               amdgpu_gem_va_update_vm(adev, bo_va);
+               amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
 
        drm_gem_object_unreference_unlocked(gobj);
        return r;
index 52dff75aac6f3e5c3ac04252ec19c122bace0558..bc0fac618a3f01121edb740207b9866b6818f71f 100644 (file)
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
        if (vm) {
                /* do context switch */
                amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
-       }
 
-       if (vm && ring->funcs->emit_gds_switch)
-               amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
-                                           ib->gds_base, ib->gds_size,
-                                           ib->gws_base, ib->gws_size,
-                                           ib->oa_base, ib->oa_size);
+               if (ring->funcs->emit_gds_switch)
+                       amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
+                                                   ib->gds_base, ib->gds_size,
+                                                   ib->gws_base, ib->gws_size,
+                                                   ib->oa_base, ib->oa_size);
 
-       if (ring->funcs->emit_hdp_flush)
-               amdgpu_ring_emit_hdp_flush(ring);
+               if (ring->funcs->emit_hdp_flush)
+                       amdgpu_ring_emit_hdp_flush(ring);
+       }
 
        old_ctx = ring->current_ctx;
        for (i = 0; i < num_ibs; ++i) {
index 5533434c7a8fad8dd22bab138c84236befe13167..9736892bcdf932c328a883473a6c3e23d560cd38 100644 (file)
@@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 
                for (i = 0; i < adev->num_ip_blocks; i++) {
                        if (adev->ip_blocks[i].type == type &&
-                           adev->ip_block_enabled[i]) {
+                           adev->ip_block_status[i].valid) {
                                ip.hw_ip_version_major = adev->ip_blocks[i].major;
                                ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
                                ip.capabilities_flags = 0;
@@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 
                for (i = 0; i < adev->num_ip_blocks; i++)
                        if (adev->ip_blocks[i].type == type &&
-                           adev->ip_block_enabled[i] &&
+                           adev->ip_block_status[i].valid &&
                            count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
                                count++;
 
@@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                return n ? -EFAULT : 0;
        }
        case AMDGPU_INFO_DEV_INFO: {
-               struct drm_amdgpu_info_device dev_info;
+               struct drm_amdgpu_info_device dev_info = {};
                struct amdgpu_cu_info cu_info;
 
                dev_info.device_id = dev->pdev->device;
@@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
                dev_info.vram_type = adev->mc.vram_type;
                dev_info.vram_bit_width = adev->mc.vram_width;
+               dev_info.vce_harvest_config = adev->vce.harvest_config;
 
                return copy_to_user(out, &dev_info,
                                    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
index f75a31df30bdb704f93e5dd465a3a74d93b524d8..ace870afc7d45154a6bb6013cb45b773a8e63512 100644 (file)
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
        amdgpu_free_extended_power_table(adev);
 }
 
+#define ixSMUSVI_NB_CURRENTVID 0xD8230044
+#define CURRENT_NB_VID_MASK 0xff000000
+#define CURRENT_NB_VID__SHIFT 24
+#define ixSMUSVI_GFX_CURRENTVID  0xD8230048
+#define CURRENT_GFX_VID_MASK 0xff000000
+#define CURRENT_GFX_VID__SHIFT 24
+
 static void
 cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
                                               struct seq_file *m)
 {
+       struct cz_power_info *pi = cz_get_pi(adev);
        struct amdgpu_clock_voltage_dependency_table *table =
                &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
-       u32 current_index =
-               (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
-               TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
-               TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
-       u32 sclk, tmp;
-       u16 vddc;
-
-       if (current_index >= NUM_SCLK_LEVELS) {
-               seq_printf(m, "invalid dpm profile %d\n", current_index);
+       struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
+               &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+       struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
+               &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+       u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
+                                      TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+       u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+                                     TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+       u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+                                     TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+       u32 sclk, vclk, dclk, ecclk, tmp;
+       u16 vddnb, vddgfx;
+
+       if (sclk_index >= NUM_SCLK_LEVELS) {
+               seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
        } else {
-               sclk = table->entries[current_index].clk;
-               tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
-                       SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
-                       SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
-               vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
-               seq_printf(m, "power level %d    sclk: %u vddc: %u\n",
-                          current_index, sclk, vddc);
+               sclk = table->entries[sclk_index].clk;
+               seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
+       }
+
+       tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
+              CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+       vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+       tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
+              CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+       vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
+       seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
+
+       seq_printf(m, "uvd    %sabled\n", pi->uvd_power_gated ? "dis" : "en");
+       if (!pi->uvd_power_gated) {
+               if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                       seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
+               } else {
+                       vclk = uvd_table->entries[uvd_index].vclk;
+                       dclk = uvd_table->entries[uvd_index].dclk;
+                       seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
+               }
+       }
+
+       seq_printf(m, "vce    %sabled\n", pi->vce_power_gated ? "dis" : "en");
+       if (!pi->vce_power_gated) {
+               if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+                       seq_printf(m, "invalid vce dpm level %d\n", vce_index);
+               } else {
+                       ecclk = vce_table->entries[vce_index].ecclk;
+                       seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
+               }
        }
 }
 
@@ -1679,25 +1717,31 @@ static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev)
        if (ret)
                return ret;
 
-       DRM_INFO("DPM unforce state min=%d, max=%d.\n",
-                       pi->sclk_dpm.soft_min_clk,
-                       pi->sclk_dpm.soft_max_clk);
+       DRM_DEBUG("DPM unforce state min=%d, max=%d.\n",
+                 pi->sclk_dpm.soft_min_clk,
+                 pi->sclk_dpm.soft_max_clk);
 
        return 0;
 }
 
 static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
-                               enum amdgpu_dpm_forced_level level)
+                                 enum amdgpu_dpm_forced_level level)
 {
        int ret = 0;
 
        switch (level) {
        case AMDGPU_DPM_FORCED_LEVEL_HIGH:
+               ret = cz_dpm_unforce_dpm_levels(adev);
+               if (ret)
+                       return ret;
                ret = cz_dpm_force_highest(adev);
                if (ret)
                        return ret;
                break;
        case AMDGPU_DPM_FORCED_LEVEL_LOW:
+               ret = cz_dpm_unforce_dpm_levels(adev);
+               if (ret)
+                       return ret;
                ret = cz_dpm_force_lowest(adev);
                if (ret)
                        return ret;
@@ -1711,6 +1755,8 @@ static int cz_dpm_force_dpm_level(struct amdgpu_device *adev,
                break;
        }
 
+       adev->pm.dpm.forced_level = level;
+
        return ret;
 }
 
index 6e77964f1b640d6841ca216ab2c7f5eed73077f5..e70a26f587a03f7dde6788f88add8f32a25ad764 100644 (file)
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v10_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v10_0_vga_enable(crtc, false);
+               /* Make sure VBLANK interrupt is still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v10_0_crtc_load_lut(crtc);
                break;
index 7f7abb0e0be53026d98e074d4c0a756484eb3e1b..dcb402ee048a602ecf03aade115a872212781811 100644 (file)
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v11_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v11_0_vga_enable(crtc, false);
+               /* Make sure VBLANK interrupt is still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v11_0_crtc_load_lut(crtc);
                break;
index 08387dfd98a7f008cb67dc98160bfd6eaaedadc1..cc050a329c496e66d44e481662acf4252e5e56ef 100644 (file)
@@ -2566,6 +2566,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
        struct drm_device *dev = crtc->dev;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       unsigned type;
 
        switch (mode) {
        case DRM_MODE_DPMS_ON:
@@ -2574,6 +2575,9 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
                dce_v8_0_vga_enable(crtc, true);
                amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
                dce_v8_0_vga_enable(crtc, false);
+               /* Make sure VBLANK interrupt is still enabled */
+               type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+               amdgpu_irq_update(adev, &adev->crtc_irq, type);
                drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
                dce_v8_0_crtc_load_lut(crtc);
                break;
index 2c188fb9fd22ff1a3528673beb8866639d5ef631..2db6ab0a543dada20b64d3d5d89eb4d5f36a1873 100644 (file)
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
  * sheduling on the ring.  This function schedules the IB
  * on the gfx ring for execution by the GPU.
  */
-static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib)
 {
        bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
        u32 next_rptr = ring->wptr + 5;
 
        /* drop the CE preamble IB for the same context */
-       if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
-           (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
-           !need_ctx_switch)
+       if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
                return;
 
-       if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
-               control |= INDIRECT_BUFFER_VALID;
-
-       if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+       if (need_ctx_switch)
                next_rptr += 2;
 
        next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
-       if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+       if (need_ctx_switch) {
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
                amdgpu_ring_write(ring, 0);
        }
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, control);
 }
 
+static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib)
+{
+       u32 header, control = 0;
+       u32 next_rptr = ring->wptr + 5;
+
+       control |= INDIRECT_BUFFER_VALID;
+       next_rptr += 4;
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+       amdgpu_ring_write(ring, next_rptr);
+
+       header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+       control |= ib->length_dw |
+                          (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                                         (2 << 0) |
+#endif
+                                         (ib->gpu_addr & 0xFFFFFFFC));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+       amdgpu_ring_write(ring, control);
+}
+
 /**
  * gfx_v7_0_ring_test_ib - basic ring IB test
  *
@@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
        .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
        .parse_cs = NULL,
-       .emit_ib = gfx_v7_0_ring_emit_ib,
+       .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
        .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
        .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
        .get_wptr = gfx_v7_0_ring_get_wptr_compute,
        .set_wptr = gfx_v7_0_ring_set_wptr_compute,
        .parse_cs = NULL,
-       .emit_ib = gfx_v7_0_ring_emit_ib,
+       .emit_ib = gfx_v7_0_ring_emit_ib_compute,
        .emit_fence = gfx_v7_0_ring_emit_fence_compute,
        .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
        .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
index 7b683fb2173c728fff760c926f1204b3897b4eae..9e1d4ddbf475027e10c6e0d6d77a63efb4eec3b3 100644 (file)
@@ -1813,10 +1813,7 @@ static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev,
        u32 data, mask;
 
        data = RREG32(mmCC_RB_BACKEND_DISABLE);
-       if (data & 1)
-               data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
-       else
-               data = 0;
+       data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
 
        data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE);
 
@@ -3756,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 0x20); /* poll interval */
 }
 
-static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
+static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                  struct amdgpu_ib *ib)
 {
        bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3764,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
        u32 next_rptr = ring->wptr + 5;
 
        /* drop the CE preamble IB for the same context */
-       if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
-           (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
-           !need_ctx_switch)
+       if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
                return;
 
-       if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
-               control |= INDIRECT_BUFFER_VALID;
-
-       if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
+       if (need_ctx_switch)
                next_rptr += 2;
 
        next_rptr += 4;
@@ -3783,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, next_rptr);
 
        /* insert SWITCH_BUFFER packet before first IB in the ring frame */
-       if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
+       if (need_ctx_switch) {
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
                amdgpu_ring_write(ring, 0);
        }
@@ -3806,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
        amdgpu_ring_write(ring, control);
 }
 
+static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+                                 struct amdgpu_ib *ib)
+{
+       u32 header, control = 0;
+       u32 next_rptr = ring->wptr + 5;
+
+       control |= INDIRECT_BUFFER_VALID;
+
+       next_rptr += 4;
+       amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
+       amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
+       amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+       amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+       amdgpu_ring_write(ring, next_rptr);
+
+       header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
+
+       control |= ib->length_dw |
+                          (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
+
+       amdgpu_ring_write(ring, header);
+       amdgpu_ring_write(ring,
+#ifdef __BIG_ENDIAN
+                                         (2 << 0) |
+#endif
+                                         (ib->gpu_addr & 0xFFFFFFFC));
+       amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
+       amdgpu_ring_write(ring, control);
+}
+
 static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
                                         u64 seq, unsigned flags)
 {
@@ -4227,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
        .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
        .parse_cs = NULL,
-       .emit_ib = gfx_v8_0_ring_emit_ib,
+       .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
        .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
        .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4243,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
        .get_wptr = gfx_v8_0_ring_get_wptr_compute,
        .set_wptr = gfx_v8_0_ring_set_wptr_compute,
        .parse_cs = NULL,
-       .emit_ib = gfx_v8_0_ring_emit_ib,
+       .emit_ib = gfx_v8_0_ring_emit_ib_compute,
        .emit_fence = gfx_v8_0_ring_emit_fence_compute,
        .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
        .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
index d62c4002e39cc7acc5a3427d0aed92bedf12c984..d1064ca3670ec31376293950c76452414e94ff23 100644 (file)
@@ -35,6 +35,8 @@
 #include "oss/oss_2_0_d.h"
 #include "oss/oss_2_0_sh_mask.h"
 #include "gca/gfx_8_0_d.h"
+#include "smu/smu_7_1_2_d.h"
+#include "smu/smu_7_1_2_sh_mask.h"
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
 
        mutex_lock(&adev->grbm_idx_mutex);
        for (idx = 0; idx < 2; ++idx) {
+
+               if (adev->vce.harvest_config & (1 << idx))
+                       continue;
+
                if(idx == 0)
                        WREG32_P(mmGRBM_GFX_INDEX, 0,
                                ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
        return 0;
 }
 
+#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS     0xC0014074
+#define VCE_HARVEST_FUSE_MACRO__SHIFT       27
+#define VCE_HARVEST_FUSE_MACRO__MASK        0x18000000
+
+static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
+{
+       u32 tmp;
+       unsigned ret;
+
+       if (adev->flags & AMDGPU_IS_APU)
+               tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
+                      VCE_HARVEST_FUSE_MACRO__MASK) >>
+                       VCE_HARVEST_FUSE_MACRO__SHIFT;
+       else
+               tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
+                      CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
+                       CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
+
+       switch (tmp) {
+       case 1:
+               ret = AMDGPU_VCE_HARVEST_VCE0;
+               break;
+       case 2:
+               ret = AMDGPU_VCE_HARVEST_VCE1;
+               break;
+       case 3:
+               ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
+               break;
+       default:
+               ret = 0;
+       }
+
+       return ret;
+}
+
 static int vce_v3_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
+
+       if ((adev->vce.harvest_config &
+            (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
+           (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
+               return -ENOENT;
+
        vce_v3_0_set_ring_funcs(adev);
        vce_v3_0_set_irq_funcs(adev);
 
index fa5a4448531dfe9dd307d88b55e051761821a28d..68552da4028740167ddb20289c274bc332935172 100644 (file)
@@ -122,6 +122,32 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
        spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
 }
 
+/* smu_8_0_d.h */
+#define mmMP0PUB_IND_INDEX                                                      0x180
+#define mmMP0PUB_IND_DATA                                                       0x181
+
+static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
+{
+       unsigned long flags;
+       u32 r;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(mmMP0PUB_IND_INDEX, (reg));
+       r = RREG32(mmMP0PUB_IND_DATA);
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+       return r;
+}
+
+static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&adev->smc_idx_lock, flags);
+       WREG32(mmMP0PUB_IND_INDEX, (reg));
+       WREG32(mmMP0PUB_IND_DATA, (v));
+       spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
+}
+
 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
 {
        unsigned long flags;
@@ -1222,8 +1248,13 @@ static int vi_common_early_init(void *handle)
        bool smc_enabled = false;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       adev->smc_rreg = &vi_smc_rreg;
-       adev->smc_wreg = &vi_smc_wreg;
+       if (adev->flags & AMDGPU_IS_APU) {
+               adev->smc_rreg = &cz_smc_rreg;
+               adev->smc_wreg = &cz_smc_wreg;
+       } else {
+               adev->smc_rreg = &vi_smc_rreg;
+               adev->smc_wreg = &vi_smc_wreg;
+       }
        adev->pcie_rreg = &vi_pcie_rreg;
        adev->pcie_wreg = &vi_pcie_wreg;
        adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
index 42d2ffa087169fd110da85e22a496aa080d34c9a..01ffe9bffe38a9e93d49a811a4803f49866a0861 100644 (file)
@@ -531,8 +531,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
 
        drm_crtc_vblank_off(crtc);
 
-       crtc->mode = *adj;
-
        val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
        if (val != dcrtc->dumb_ctrl) {
                dcrtc->dumb_ctrl = val;
index 580e10acaa3ace493c16305eabecd75394c6e49a..60a688ef81c71cf1174c081426c10c80e9453bdb 100644 (file)
@@ -69,8 +69,9 @@ void armada_gem_free_object(struct drm_gem_object *obj)
 
        if (dobj->obj.import_attach) {
                /* We only ever display imported data */
-               dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
-                                        DMA_TO_DEVICE);
+               if (dobj->sgt)
+                       dma_buf_unmap_attachment(dobj->obj.import_attach,
+                                                dobj->sgt, DMA_TO_DEVICE);
                drm_prime_gem_destroy(&dobj->obj, NULL);
        }
 
index c5b06fdb459c4560c46dd68a9c52314f87c319b0..e939faba7fcca8b0ff737008de124d3b7f96b3a5 100644 (file)
@@ -7,6 +7,7 @@
  * published by the Free Software Foundation.
  */
 #include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
 #include "armada_crtc.h"
 #include "armada_drm.h"
 #include "armada_fb.h"
@@ -85,16 +86,8 @@ static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
 
        if (fb)
                armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
-}
 
-static unsigned armada_limit(int start, unsigned size, unsigned max)
-{
-       int end = start + size;
-       if (end < 0)
-               return 0;
-       if (start < 0)
-               start = 0;
-       return (unsigned)end > max ? max - start : end - start;
+       wake_up(&dplane->vbl.wait);
 }
 
 static int
@@ -105,26 +98,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
 {
        struct armada_plane *dplane = drm_to_armada_plane(plane);
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct drm_rect src = {
+               .x1 = src_x,
+               .y1 = src_y,
+               .x2 = src_x + src_w,
+               .y2 = src_y + src_h,
+       };
+       struct drm_rect dest = {
+               .x1 = crtc_x,
+               .y1 = crtc_y,
+               .x2 = crtc_x + crtc_w,
+               .y2 = crtc_y + crtc_h,
+       };
+       const struct drm_rect clip = {
+               .x2 = crtc->mode.hdisplay,
+               .y2 = crtc->mode.vdisplay,
+       };
        uint32_t val, ctrl0;
        unsigned idx = 0;
+       bool visible;
        int ret;
 
-       crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
-       crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+       ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
+                                           0, INT_MAX, true, false, &visible);
+       if (ret)
+               return ret;
+
        ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
                CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
                CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
 
        /* Does the position/size result in nothing to display? */
-       if (crtc_w == 0 || crtc_h == 0) {
+       if (!visible)
                ctrl0 &= ~CFG_DMA_ENA;
-       }
-
-       /*
-        * FIXME: if the starting point is off screen, we need to
-        * adjust src_x, src_y, src_w, src_h appropriately, and
-        * according to the scale.
-        */
 
        if (!dcrtc->plane) {
                dcrtc->plane = plane;
@@ -134,15 +140,19 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        /* FIXME: overlay on an interlaced display */
        /* Just updating the position/size? */
        if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
-               val = (src_h & 0xffff0000) | src_w >> 16;
+               val = (drm_rect_height(&src) & 0xffff0000) |
+                     drm_rect_width(&src) >> 16;
                dplane->src_hw = val;
                writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
-               val = crtc_h << 16 | crtc_w;
+
+               val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
                dplane->dst_hw = val;
                writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
-               val = crtc_y << 16 | crtc_x;
+
+               val = dest.y1 << 16 | dest.x1;
                dplane->dst_yx = val;
                writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+
                return 0;
        } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
                /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
@@ -150,15 +160,14 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                               dcrtc->base + LCD_SPU_SRAM_PARA1);
        }
 
-       ret = wait_event_timeout(dplane->vbl.wait,
-                                list_empty(&dplane->vbl.update.node),
-                                HZ/25);
-       if (ret < 0)
-               return ret;
+       wait_event_timeout(dplane->vbl.wait,
+                          list_empty(&dplane->vbl.update.node),
+                          HZ/25);
 
        if (plane->fb != fb) {
                struct armada_gem_object *obj = drm_fb_obj(fb);
-               uint32_t sy, su, sv;
+               uint32_t addr[3], pixel_format;
+               int i, num_planes, hsub;
 
                /*
                 * Take a reference on the new framebuffer - we want to
@@ -178,26 +187,39 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                                                            older_fb);
                }
 
-               src_y >>= 16;
-               src_x >>= 16;
-               sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
-                       src_x * fb->bits_per_pixel / 8;
-               su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
-                       src_x;
-               sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
-                       src_x;
+               src_y = src.y1 >> 16;
+               src_x = src.x1 >> 16;
 
-               armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+               pixel_format = fb->pixel_format;
+               hsub = drm_format_horz_chroma_subsampling(pixel_format);
+               num_planes = drm_format_num_planes(pixel_format);
+
+               /*
+                * Annoyingly, shifting a YUYV-format image by one pixel
+                * causes the U/V planes to toggle.  Toggle the UV swap.
+                * (Unfortunately, this causes momentary colour flickering.)
+                */
+               if (src_x & (hsub - 1) && num_planes == 1)
+                       ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+               for (i = 0; i < num_planes; i++)
+                       addr[i] = obj->dev_addr + fb->offsets[i] +
+                                 src_y * fb->pitches[i] +
+                                 src_x * drm_format_plane_cpp(pixel_format, i);
+               for (; i < ARRAY_SIZE(addr); i++)
+                       addr[i] = 0;
+
+               armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
                                     LCD_SPU_DMA_START_ADDR_Y0);
-               armada_reg_queue_set(dplane->vbl.regs, idx, su,
+               armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
                                     LCD_SPU_DMA_START_ADDR_U0);
-               armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+               armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
                                     LCD_SPU_DMA_START_ADDR_V0);
-               armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+               armada_reg_queue_set(dplane->vbl.regs, idx, addr[0],
                                     LCD_SPU_DMA_START_ADDR_Y1);
-               armada_reg_queue_set(dplane->vbl.regs, idx, su,
+               armada_reg_queue_set(dplane->vbl.regs, idx, addr[1],
                                     LCD_SPU_DMA_START_ADDR_U1);
-               armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+               armada_reg_queue_set(dplane->vbl.regs, idx, addr[2],
                                     LCD_SPU_DMA_START_ADDR_V1);
 
                val = fb->pitches[0] << 16 | fb->pitches[0];
@@ -208,24 +230,27 @@ armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
                                     LCD_SPU_DMA_PITCH_UV);
        }
 
-       val = (src_h & 0xffff0000) | src_w >> 16;
+       val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
        if (dplane->src_hw != val) {
                dplane->src_hw = val;
                armada_reg_queue_set(dplane->vbl.regs, idx, val,
                                     LCD_SPU_DMA_HPXL_VLN);
        }
-       val = crtc_h << 16 | crtc_w;
+
+       val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
        if (dplane->dst_hw != val) {
                dplane->dst_hw = val;
                armada_reg_queue_set(dplane->vbl.regs, idx, val,
                                     LCD_SPU_DZM_HPXL_VLN);
        }
-       val = crtc_y << 16 | crtc_x;
+
+       val = dest.y1 << 16 | dest.x1;
        if (dplane->dst_yx != val) {
                dplane->dst_yx = val;
                armada_reg_queue_set(dplane->vbl.regs, idx, val,
                                     LCD_SPU_DMA_OVSA_HPXL_VLN);
        }
+
        if (dplane->ctrl0 != ctrl0) {
                dplane->ctrl0 = ctrl0;
                armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
@@ -279,7 +304,11 @@ static int armada_plane_disable(struct drm_plane *plane)
 
 static void armada_plane_destroy(struct drm_plane *plane)
 {
-       kfree(plane);
+       struct armada_plane *dplane = drm_to_armada_plane(plane);
+
+       drm_plane_cleanup(plane);
+
+       kfree(dplane);
 }
 
 static int armada_plane_set_property(struct drm_plane *plane,
index f69b92535505b5ae1c899d6f9b08f76501851fa7..5ae5c69231280a5e1c23882289388f9ceb75a701 100644 (file)
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
                planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
 
        drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
+       drm_crtc_vblank_reset(&crtc->base);
 
        dc->crtc = &crtc->base;
 
index 60b0c13d7ff5cc6f4c338c9ed3d7f423d684c84e..6fad1f9648f38870b2162cb74a6320f50c34aabc 100644 (file)
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 
        pm_runtime_enable(dev->dev);
 
-       ret = atmel_hlcdc_dc_modeset_init(dev);
+       ret = drm_vblank_init(dev, 1);
        if (ret < 0) {
-               dev_err(dev->dev, "failed to initialize mode setting\n");
+               dev_err(dev->dev, "failed to initialize vblank\n");
                goto err_periph_clk_disable;
        }
 
-       drm_mode_config_reset(dev);
-
-       ret = drm_vblank_init(dev, 1);
+       ret = atmel_hlcdc_dc_modeset_init(dev);
        if (ret < 0) {
-               dev_err(dev->dev, "failed to initialize vblank\n");
+               dev_err(dev->dev, "failed to initialize mode setting\n");
                goto err_periph_clk_disable;
        }
 
+       drm_mode_config_reset(dev);
+
        pm_runtime_get_sync(dev->dev);
        ret = drm_irq_install(dev, dc->hlcdc->irq);
        pm_runtime_put_sync(dev->dev);
index b9ba06176eb1e10c6e4074a7a935a08a50313819..fed748311b928cc534f781505a37686ec7a46ba3 100644 (file)
@@ -2706,8 +2706,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       /* For some reason crtc x/y offsets are signed internally. */
-       if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
+       /*
+        * Universal plane src offsets are only 16.16, prevent havoc for
+        * drivers using universal plane code internally.
+        */
+       if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
                return -ERANGE;
 
        drm_modeset_lock_all(dev);
@@ -5395,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
                if (encoder->funcs->reset)
                        encoder->funcs->reset(encoder);
 
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               connector->status = connector_status_unknown;
-
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
                if (connector->funcs->reset)
                        connector->funcs->reset(connector);
-       }
 }
 EXPORT_SYMBOL(drm_mode_config_reset);
 
index aa8bbb460c5715a619988d6d4350b4a4507173f8..9cfcd0aef0dfacf8389407d78981b78c8c85ddf8 100644 (file)
@@ -70,6 +70,8 @@
 
 #define DRM_IOCTL_WAIT_VBLANK32                DRM_IOWR(0x3a, drm_wait_vblank32_t)
 
+#define DRM_IOCTL_MODE_ADDFB232                DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
+
 typedef struct drm_version_32 {
        int version_major;        /**< Major version */
        int version_minor;        /**< Minor version */
@@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
        return 0;
 }
 
+typedef struct drm_mode_fb_cmd232 {
+       u32 fb_id;
+       u32 width;
+       u32 height;
+       u32 pixel_format;
+       u32 flags;
+       u32 handles[4];
+       u32 pitches[4];
+       u32 offsets[4];
+       u64 modifier[4];
+} __attribute__((packed)) drm_mode_fb_cmd232_t;
+
+static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+                                 unsigned long arg)
+{
+       struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
+       struct drm_mode_fb_cmd232 req32;
+       struct drm_mode_fb_cmd2 __user *req64;
+       int i;
+       int err;
+
+       if (copy_from_user(&req32, argp, sizeof(req32)))
+               return -EFAULT;
+
+       req64 = compat_alloc_user_space(sizeof(*req64));
+
+       if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
+           || __put_user(req32.width, &req64->width)
+           || __put_user(req32.height, &req64->height)
+           || __put_user(req32.pixel_format, &req64->pixel_format)
+           || __put_user(req32.flags, &req64->flags))
+               return -EFAULT;
+
+       for (i = 0; i < 4; i++) {
+               if (__put_user(req32.handles[i], &req64->handles[i]))
+                       return -EFAULT;
+               if (__put_user(req32.pitches[i], &req64->pitches[i]))
+                       return -EFAULT;
+               if (__put_user(req32.offsets[i], &req64->offsets[i]))
+                       return -EFAULT;
+               if (__put_user(req32.modifier[i], &req64->modifier[i]))
+                       return -EFAULT;
+       }
+
+       err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
+       if (err)
+               return err;
+
+       if (__get_user(req32.fb_id, &req64->fb_id))
+               return -EFAULT;
+
+       if (copy_to_user(argp, &req32, sizeof(req32)))
+               return -EFAULT;
+
+       return 0;
+}
+
 static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
        [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
@@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
        [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
 #endif
        [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+       [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
 };
 
 /**
index 542fac628b288225262289f424e92576efb7acb4..fd1de451c8c6bae13f42eaae5e9cfe572039ea18 100644 (file)
@@ -826,6 +826,7 @@ struct intel_context {
        struct kref ref;
        int user_handle;
        uint8_t remap_slice;
+       struct drm_i915_private *i915;
        struct drm_i915_file_private *file_priv;
        struct i915_ctx_hang_stats hang_stats;
        struct i915_hw_ppgtt *ppgtt;
@@ -2036,8 +2037,6 @@ struct drm_i915_gem_object {
        unsigned int cache_level:3;
        unsigned int cache_dirty:1;
 
-       unsigned int has_dma_mapping:1;
-
        unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
        unsigned int pin_display;
@@ -3116,7 +3115,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
 int i915_debugfs_connector_add(struct drm_connector *connector);
 void intel_display_crc_init(struct drm_device *dev);
 #else
-static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
+static inline int i915_debugfs_connector_add(struct drm_connector *connector)
+{ return 0; }
 static inline void intel_display_crc_init(struct drm_device *dev) {}
 #endif
 
@@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
 #define I915_READ64(reg)       dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define I915_READ64_2x32(lower_reg, upper_reg) ({                      \
-               u32 upper = I915_READ(upper_reg);                       \
-               u32 lower = I915_READ(lower_reg);                       \
-               u32 tmp = I915_READ(upper_reg);                         \
-               if (upper != tmp) {                                     \
-                       upper = tmp;                                    \
-                       lower = I915_READ(lower_reg);                   \
-                       WARN_ON(I915_READ(upper_reg) != upper);         \
-               }                                                       \
-               (u64)upper << 32 | lower; })
+       u32 upper, lower, tmp;                                          \
+       tmp = I915_READ(upper_reg);                                     \
+       do {                                                            \
+               upper = tmp;                                            \
+               lower = I915_READ(lower_reg);                           \
+               tmp = I915_READ(upper_reg);                             \
+       } while (upper != tmp);                                         \
+       (u64)upper << 32 | lower; })
 
 #define POSTING_READ(reg)      (void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)    (void)I915_READ16_NOTRACE(reg)
index 248fd1ac7b3a04c8b866eeaba327fcd65f67c8a9..52b446b27b4d08359ce50577f53176629e323a64 100644 (file)
@@ -213,7 +213,6 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        sg_dma_len(sg) = obj->base.size;
 
        obj->pages = st;
-       obj->has_dma_mapping = true;
        return 0;
 }
 
@@ -265,8 +264,6 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
 
        sg_free_table(obj->pages);
        kfree(obj->pages);
-
-       obj->has_dma_mapping = false;
 }
 
 static void
@@ -2139,6 +2136,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
+       i915_gem_gtt_finish_object(obj);
+
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_save_bit_17_swizzle(obj);
 
@@ -2199,6 +2198,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct sg_page_iter sg_iter;
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
+       int ret;
        gfp_t gfp;
 
        /* Assert that the object is not currently in any GPU domain. As it
@@ -2246,8 +2246,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                         */
                        i915_gem_shrink_all(dev_priv);
                        page = shmem_read_mapping_page(mapping, i);
-                       if (IS_ERR(page))
+                       if (IS_ERR(page)) {
+                               ret = PTR_ERR(page);
                                goto err_pages;
+                       }
                }
 #ifdef CONFIG_SWIOTLB
                if (swiotlb_nr_tbl()) {
@@ -2276,6 +2278,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                sg_mark_end(sg);
        obj->pages = st;
 
+       ret = i915_gem_gtt_prepare_object(obj);
+       if (ret)
+               goto err_pages;
+
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj);
 
@@ -2300,10 +2306,10 @@ err_pages:
         * space and so want to translate the error from shmemfs back to our
         * usual understanding of ENOMEM.
         */
-       if (PTR_ERR(page) == -ENOSPC)
-               return -ENOMEM;
-       else
-               return PTR_ERR(page);
+       if (ret == -ENOSPC)
+               ret = -ENOMEM;
+
+       return ret;
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
@@ -2542,6 +2548,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
        }
 
        request->emitted_jiffies = jiffies;
+       ring->last_submitted_seqno = request->seqno;
        list_add_tail(&request->list, &ring->request_list);
        request->file_priv = NULL;
 
@@ -3247,10 +3254,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        /* Since the unbound list is global, only move to that list if
         * no more VMAs exist. */
-       if (list_empty(&obj->vma_list)) {
-               i915_gem_gtt_finish_object(obj);
+       if (list_empty(&obj->vma_list))
                list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
-       }
 
        /* And finally now the object is completely decoupled from this vma,
         * we can drop its hold on the backing storage and allow it to be
@@ -3768,22 +3773,16 @@ search_free:
                goto err_remove_node;
        }
 
-       ret = i915_gem_gtt_prepare_object(obj);
-       if (ret)
-               goto err_remove_node;
-
        trace_i915_vma_bind(vma, flags);
        ret = i915_vma_bind(vma, obj->cache_level, flags);
        if (ret)
-               goto err_finish_gtt;
+               goto err_remove_node;
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
        list_add_tail(&vma->mm_list, &vm->inactive_list);
 
        return vma;
 
-err_finish_gtt:
-       i915_gem_gtt_finish_object(obj);
 err_remove_node:
        drm_mm_remove_node(&vma->node);
 err_free_vma:
index d65cbe6afb92d4c1d11b06d9e144eea35f8db56b..48afa777e94aa849e13890be64b3d4303283eee1 100644 (file)
@@ -135,8 +135,7 @@ static int get_context_size(struct drm_device *dev)
 
 void i915_gem_context_free(struct kref *ctx_ref)
 {
-       struct intel_context *ctx = container_of(ctx_ref,
-                                                typeof(*ctx), ref);
+       struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
 
        trace_i915_context_free(ctx);
 
@@ -195,6 +194,7 @@ __create_hw_context(struct drm_device *dev,
 
        kref_init(&ctx->ref);
        list_add_tail(&ctx->link, &dev_priv->context_list);
+       ctx->i915 = dev_priv;
 
        if (dev_priv->hw_context_size) {
                struct drm_i915_gem_object *obj =
index 7998da27c500744c78fd0897193b19f778244ca7..e9c2bfd85b5268425ce9465ee2d23d99ee92c534 100644 (file)
@@ -256,7 +256,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
                return PTR_ERR(sg);
 
        obj->pages = sg;
-       obj->has_dma_mapping = true;
        return 0;
 }
 
@@ -264,7 +263,6 @@ static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
        dma_buf_unmap_attachment(obj->base.import_attach,
                                 obj->pages, DMA_BIDIRECTIONAL);
-       obj->has_dma_mapping = false;
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
index dcc6a88c560ebaf2b788971fe36984bd7402cf92..31e8269e6e3dab33d809f693f7cc6ce8cf318975 100644 (file)
@@ -1723,9 +1723,6 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 
 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
 {
-       if (obj->has_dma_mapping)
-               return 0;
-
        if (!dma_map_sg(&obj->base.dev->pdev->dev,
                        obj->pages->sgl, obj->pages->nents,
                        PCI_DMA_BIDIRECTIONAL))
@@ -1926,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
                vma->vm->insert_entries(vma->vm, pages,
                                        vma->node.start,
                                        cache_level, pte_flags);
+
+               /* Note the inconsistency here is due to absence of the
+                * aliasing ppgtt on gen4 and earlier. Though we always
+                * request PIN_USER for execbuffer (translated to LOCAL_BIND),
+                * without the appgtt, we cannot honour that request and so
+                * must substitute it with a global binding. Since we do this
+                * behind the upper layers back, we need to explicitly set
+                * the bound flag ourselves.
+                */
+               vma->bound |= GLOBAL_BIND;
+
        }
 
        if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@@ -1972,10 +1980,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 
        interruptible = do_idling(dev_priv);
 
-       if (!obj->has_dma_mapping)
-               dma_unmap_sg(&dev->pdev->dev,
-                            obj->pages->sgl, obj->pages->nents,
-                            PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
+                    PCI_DMA_BIDIRECTIONAL);
 
        undo_idling(dev_priv, interruptible);
 }
index 348ed5abcdbf6d9745193254952da8f85a8c6909..8b5b784c62fea276fc9bc947a85734f75f81a9eb 100644 (file)
@@ -416,7 +416,6 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
        if (obj->pages == NULL)
                goto cleanup;
 
-       obj->has_dma_mapping = true;
        i915_gem_object_pin_pages(obj);
        obj->stolen = stolen;
 
index d61e74a08f829b1491b61e4557c689546dce9c3f..d19c9db5e18c9d57057ad78ffdbdfa1a65b861b4 100644 (file)
@@ -183,18 +183,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
                if (IS_GEN4(dev)) {
                        uint32_t ddc2 = I915_READ(DCC2);
 
-                       if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
-                               /* Since the swizzling may vary within an
-                                * object, we have no idea what the swizzling
-                                * is for any page in particular. Thus we
-                                * cannot migrate tiled pages using the GPU,
-                                * nor can we tell userspace what the exact
-                                * swizzling is for any object.
-                                */
+                       if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
                                dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
-                               swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
-                               swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
-                       }
                }
 
                if (dcc == 0xffffffff) {
@@ -474,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        }
 
        /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
-       args->phys_swizzle_mode = args->swizzle_mode;
+       if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
+               args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
+       else
+               args->phys_swizzle_mode = args->swizzle_mode;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
                args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
index 1f4e5a32a16e568cae0cc340c39482d13af7ef41..8fd431bcdfd3a33ffb6afda7a1584b44e33d8296 100644 (file)
@@ -545,6 +545,26 @@ err:
        return ret;
 }
 
+static int
+__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
+                            struct page **pvec, int num_pages)
+{
+       int ret;
+
+       ret = st_set_pages(&obj->pages, pvec, num_pages);
+       if (ret)
+               return ret;
+
+       ret = i915_gem_gtt_prepare_object(obj);
+       if (ret) {
+               sg_free_table(obj->pages);
+               kfree(obj->pages);
+               obj->pages = NULL;
+       }
+
+       return ret;
+}
+
 static void
 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
@@ -584,9 +604,12 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (obj->userptr.work != &work->work) {
                ret = 0;
        } else if (pinned == num_pages) {
-               ret = st_set_pages(&obj->pages, pvec, num_pages);
+               ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
                if (ret == 0) {
                        list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
+                       obj->get_page.sg = obj->pages->sgl;
+                       obj->get_page.last = 0;
+
                        pinned = 0;
                }
        }
@@ -693,7 +716,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
                        }
                }
        } else {
-               ret = st_set_pages(&obj->pages, pvec, num_pages);
+               ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
                if (ret == 0) {
                        obj->userptr.work = NULL;
                        pinned = 0;
@@ -715,6 +738,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
        if (obj->madv != I915_MADV_WILLNEED)
                obj->dirty = 0;
 
+       i915_gem_gtt_finish_object(obj);
+
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
                struct page *page = sg_page_iter_page(&sg_iter);
 
index 176de6322e4d0039482422bc1210ff89a1af769b..23aa04cded6b013d6e78c046926cc63d03dae628 100644 (file)
@@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        drm_ioctl_compat_t *fn = NULL;
        int ret;
 
-       if (nr < DRM_COMMAND_BASE)
+       if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
                return drm_compat_ioctl(filp, cmd, arg);
 
        if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
index e6bb72dca3ffb15ae732aac0291dc13806eaba92..984e2fe6688c4c2cabd8829fe2e5cad97e88770e 100644 (file)
@@ -2706,18 +2706,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
-static struct drm_i915_gem_request *
-ring_last_request(struct intel_engine_cs *ring)
-{
-       return list_entry(ring->request_list.prev,
-                         struct drm_i915_gem_request, list);
-}
-
 static bool
-ring_idle(struct intel_engine_cs *ring)
+ring_idle(struct intel_engine_cs *ring, u32 seqno)
 {
        return (list_empty(&ring->request_list) ||
-               i915_gem_request_completed(ring_last_request(ring), false));
+               i915_seqno_passed(seqno, ring->last_submitted_seqno));
 }
 
 static bool
@@ -2939,7 +2932,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                acthd = intel_ring_get_active_head(ring);
 
                if (ring->hangcheck.seqno == seqno) {
-                       if (ring_idle(ring)) {
+                       if (ring_idle(ring, seqno)) {
                                ring->hangcheck.action = HANGCHECK_IDLE;
 
                                if (waitqueue_active(&ring->irq_queue)) {
index 497cba5deb1e9db162a6a59c0993ca4c4712e649..849a2590e010ca1d10de85fea06cbb41211ad2d8 100644 (file)
@@ -727,7 +727,7 @@ DECLARE_EVENT_CLASS(i915_context,
        TP_fast_assign(
                        __entry->ctx = ctx;
                        __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
-                       __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
+                       __entry->dev = ctx->i915->dev->primary->index;
        ),
 
        TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
index 647b1404c441374beba20b32be37192d0b0ace95..30e0f54ba19d1284107958bb6e5d49f6309b63de 100644 (file)
@@ -6315,9 +6315,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        struct drm_connector *connector;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       /* crtc should still be enabled when we disable it. */
-       WARN_ON(!crtc->state->enable);
-
        intel_crtc_disable_planes(crtc);
        dev_priv->display.crtc_disable(crtc);
        dev_priv->display.off(crtc);
@@ -12591,7 +12588,8 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
                        continue;
 
                if (!crtc_state->enable) {
-                       intel_crtc_disable(crtc);
+                       if (crtc->state->enable)
+                               intel_crtc_disable(crtc);
                } else if (crtc->state->enable) {
                        intel_crtc_disable_planes(crtc);
                        dev_priv->display.crtc_disable(crtc);
@@ -13276,7 +13274,7 @@ intel_check_primary_plane(struct drm_plane *plane,
        if (ret)
                return ret;
 
-       if (intel_crtc->active) {
+       if (crtc_state ? crtc_state->base.active : intel_crtc->active) {
                struct intel_plane_state *old_state =
                        to_intel_plane_state(plane->state);
 
index e539314ae87e0280b3d1195c380c4e3764e7c330..4be66f60504d13661f07886cb891fd9417da754f 100644 (file)
@@ -275,6 +275,13 @@ struct  intel_engine_cs {
         * Do we have some not yet emitted requests outstanding?
         */
        struct drm_i915_gem_request *outstanding_lazy_request;
+       /**
+        * Seqno of request most recently submitted to request_list.
+        * Used exclusively by hang checker to avoid grabbing lock while
+        * inspecting request list.
+        */
+       u32 last_submitted_seqno;
+
        bool gpu_caches_dirty;
 
        wait_queue_head_t irq_queue;
index a6d8a3ee7750adecb552415c8ffab06d599f46ff..260389acfb7752d01ce0ebd59597f06a671b2ea1 100644 (file)
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_reg_read *reg = data;
        struct register_whitelist const *entry = whitelist;
+       unsigned size;
+       u64 offset;
        int i, ret = 0;
 
        for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
-               if (entry->offset == reg->offset &&
+               if (entry->offset == (reg->offset & -entry->size) &&
                    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
                        break;
        }
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        if (i == ARRAY_SIZE(whitelist))
                return -EINVAL;
 
+       /* We use the low bits to encode extra flags as the register should
+        * be naturally aligned (and those that are not so aligned merely
+        * limit the available flags for that register).
+        */
+       offset = entry->offset;
+       size = entry->size;
+       size |= reg->offset ^ offset;
+
        intel_runtime_pm_get(dev_priv);
 
-       switch (entry->size) {
+       switch (size) {
+       case 8 | 1:
+               reg->val = I915_READ64_2x32(offset, offset+4);
+               break;
        case 8:
-               reg->val = I915_READ64(reg->offset);
+               reg->val = I915_READ64(offset);
                break;
        case 4:
-               reg->val = I915_READ(reg->offset);
+               reg->val = I915_READ(offset);
                break;
        case 2:
-               reg->val = I915_READ16(reg->offset);
+               reg->val = I915_READ16(offset);
                break;
        case 1:
-               reg->val = I915_READ8(reg->offset);
+               reg->val = I915_READ8(offset);
                break;
        default:
-               MISSING_CASE(entry->size);
                ret = -EINVAL;
                goto out;
        }
index 214eceefc981e4018b9aeb262ad2ebbc4b4abfa0..e671ad3694166041ce76c4eaa57539897eba81d1 100644 (file)
@@ -301,7 +301,7 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
 
        switch (tve->mode) {
        case TVE_MODE_VGA:
-               imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_YUV8_1X24,
+               imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
                                            tve->hsync_pin, tve->vsync_pin);
                break;
        case TVE_MODE_TVOUT:
index 74a9ce40ddc457f476611478d87f14ec2f67d8fd..b4deb9cf9d71613fa49562c4619cf8ffec5a33f3 100644 (file)
@@ -21,6 +21,7 @@
 #include <drm/drm_panel.h>
 #include <linux/videodev2.h>
 #include <video/of_display_timing.h>
+#include <linux/of_graph.h>
 
 #include "imx-drm.h"
 
@@ -208,7 +209,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
 {
        struct drm_device *drm = data;
        struct device_node *np = dev->of_node;
-       struct device_node *panel_node;
+       struct device_node *port;
        const u8 *edidp;
        struct imx_parallel_display *imxpd;
        int ret;
@@ -234,11 +235,19 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
                        imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
        }
 
-       panel_node = of_parse_phandle(np, "fsl,panel", 0);
-       if (panel_node) {
-               imxpd->panel = of_drm_find_panel(panel_node);
-               if (!imxpd->panel)
-                       return -EPROBE_DEFER;
+       /* port@1 is the output port */
+       port = of_graph_get_port_by_id(np, 1);
+       if (port) {
+               struct device_node *endpoint, *remote;
+
+               endpoint = of_get_child_by_name(port, "endpoint");
+               if (endpoint) {
+                       remote = of_graph_get_remote_port_parent(endpoint);
+                       if (remote)
+                               imxpd->panel = of_drm_find_panel(remote);
+                       if (!imxpd->panel)
+                               return -EPROBE_DEFER;
+               }
        }
 
        imxpd->dev = dev;
index 0d1dbb73793355043d514f2c4239e16386671e00..247a424445f75bb4d27573e0d9ac208262a6ac97 100644 (file)
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
        uint32_t op_mode = 0;
        uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
        uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
-       enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
+       enum mdp4_frame_format frame_type;
 
        if (!(crtc && fb)) {
                DBG("%s: disabled!", mdp4_plane->name);
                return 0;
        }
 
+       frame_type = mdp4_get_frame_format(fb);
+
        /* src values are in Q16 fixed point, convert to integer: */
        src_x = src_x >> 16;
        src_y = src_y >> 16;
index 206f758f7d64849af986e6c70589bf96d7b0cb7b..e253db5de5aa5955a2841f9ff8cdc63e5b519fac 100644 (file)
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
 
 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 {
+       int i;
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
+
+       for (i = 0; i < nplanes; i++) {
+               struct drm_plane *plane = state->planes[i];
+               struct drm_plane_state *plane_state = state->plane_states[i];
+
+               if (!plane)
+                       continue;
+
+               mdp5_plane_complete_commit(plane, plane_state);
+       }
+
        mdp5_disable(mdp5_kms);
 }
 
index e0eb24587c84d7887a69c47c0428ad3f0d4f1396..e79ac09b72168c4f2af8be041d541736a81223fc 100644 (file)
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
                struct drm_mode_object *obj);
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
 void mdp5_plane_complete_flip(struct drm_plane *plane);
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+       struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
                enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
index 57b8f56ae9d06fb458266181a8344858e381e6b5..22275568ab8be3a500c82f138b4c4bb04a088de4 100644 (file)
@@ -31,8 +31,6 @@ struct mdp5_plane {
 
        uint32_t nformats;
        uint32_t formats[32];
-
-       bool enabled;
 };
 #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
 
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
        return state->fb && state->crtc;
 }
 
-static int mdp5_plane_disable(struct drm_plane *plane)
-{
-       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
-       struct mdp5_kms *mdp5_kms = get_kms(plane);
-       enum mdp5_pipe pipe = mdp5_plane->pipe;
-
-       DBG("%s: disable", mdp5_plane->name);
-
-       if (mdp5_kms) {
-               /* Release the memory we requested earlier from the SMP: */
-               mdp5_smp_release(mdp5_kms->smp, pipe);
-       }
-
-       return 0;
-}
-
 static void mdp5_plane_destroy(struct drm_plane *plane)
 {
        struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
 
        if (!plane_enabled(state)) {
                to_mdp5_plane_state(state)->pending = true;
-               mdp5_plane_disable(plane);
        } else if (to_mdp5_plane_state(state)->mode_changed) {
                int ret;
                to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
        return mdp5_plane->flush_mask;
 }
 
+/* called after vsync in thread context */
+void mdp5_plane_complete_commit(struct drm_plane *plane,
+       struct drm_plane_state *state)
+{
+       struct mdp5_kms *mdp5_kms = get_kms(plane);
+       struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
+       enum mdp5_pipe pipe = mdp5_plane->pipe;
+
+       if (!plane_enabled(plane->state)) {
+               DBG("%s: free SMP", mdp5_plane->name);
+               mdp5_smp_release(mdp5_kms->smp, pipe);
+       }
+}
+
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
                enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
index 16702aecf0df714e211b8d7900fc06299e0e92f8..64a27d86f2f521444469b29e70ca9861f17d09db 100644 (file)
  * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
  *
  * For each block that can be dynamically allocated, it can be either
- * free, or pending/in-use by a client. The updates happen in three steps:
+ *     free:
+ *     The block is free.
+ *
+ *     pending:
+ *     The block is allocated to some client and not free.
+ *
+ *     configured:
+ *     The block is allocated to some client, and assigned to that
+ *     client in MDP5_MDP_SMP_ALLOC registers.
+ *
+ *     inuse:
+ *     The block is being actively used by a client.
+ *
+ * The updates happen in the following steps:
  *
  *  1) mdp5_smp_request():
  *     When plane scanout is setup, calculate required number of
- *     blocks needed per client, and request.  Blocks not inuse or
- *     pending by any other client are added to client's pending
- *     set.
+ *     blocks needed per client, and request. Blocks neither inuse nor
+ *     configured nor pending by any other client are added to client's
+ *     pending set.
+ *     For shrinking, blocks in pending but not in configured can be freed
+ *     directly, but those already in configured will be freed later by
+ *     mdp5_smp_commit.
  *
  *  2) mdp5_smp_configure():
  *     As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
  *     are configured for the union(pending, inuse)
+ *     Current pending is copied to configured.
+ *     It is assumed that mdp5_smp_request and mdp5_smp_configure not run
+ *     concurrently for the same pipe.
  *
  *  3) mdp5_smp_commit():
- *     After next vblank, copy pending -> inuse.  Optionally update
+ *     After next vblank, copy configured -> inuse.  Optionally update
  *     MDP5_SMP_ALLOC registers if there are newly unused blocks
  *
+ *  4) mdp5_smp_release():
+ *     Must be called after the pipe is disabled and no longer uses any SMB
+ *
  * On the next vblank after changes have been committed to hw, the
  * client's pending blocks become it's in-use blocks (and no-longer
  * in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
        struct mdp5_client_smp_state client_state[MAX_CLIENTS];
 };
 
+static void update_smp_state(struct mdp5_smp *smp,
+               u32 cid, mdp5_smp_state_t *assigned);
+
 static inline
 struct mdp5_kms *get_kms(struct mdp5_smp *smp)
 {
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
                for (i = cur_nblks; i > nblks; i--) {
                        int blk = find_first_bit(ps->pending, cnt);
                        clear_bit(blk, ps->pending);
-                       /* don't clear in global smp_state until _commit() */
+
+                       /* clear in global smp_state if not in configured
+                        * otherwise until _commit()
+                        */
+                       if (!test_bit(blk, ps->configured))
+                               clear_bit(blk, smp->state);
                }
        }
 
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
 /* Release SMP blocks for all clients of the pipe */
 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
 {
-       int i, nblks;
+       int i;
+       unsigned long flags;
+       int cnt = smp->blk_cnt;
+
+       for (i = 0; i < pipe2nclients(pipe); i++) {
+               mdp5_smp_state_t assigned;
+               u32 cid = pipe2client(pipe, i);
+               struct mdp5_client_smp_state *ps = &smp->client_state[cid];
+
+               spin_lock_irqsave(&smp->state_lock, flags);
+
+               /* clear hw assignment */
+               bitmap_or(assigned, ps->inuse, ps->configured, cnt);
+               update_smp_state(smp, CID_UNUSED, &assigned);
+
+               /* free to global pool */
+               bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
+               bitmap_andnot(smp->state, smp->state, assigned, cnt);
+
+               /* clear client's infor */
+               bitmap_zero(ps->pending, cnt);
+               bitmap_zero(ps->configured, cnt);
+               bitmap_zero(ps->inuse, cnt);
+
+               spin_unlock_irqrestore(&smp->state_lock, flags);
+       }
 
-       for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
-               smp_request_block(smp, pipe2client(pipe, i), 0);
        set_fifo_thresholds(smp, pipe, 0);
 }
 
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
                u32 cid = pipe2client(pipe, i);
                struct mdp5_client_smp_state *ps = &smp->client_state[cid];
 
-               bitmap_or(assigned, ps->inuse, ps->pending, cnt);
+               /*
+                * if vblank has not happened since last smp_configure
+                * skip the configure for now
+                */
+               if (!bitmap_equal(ps->inuse, ps->configured, cnt))
+                       continue;
+
+               bitmap_copy(ps->configured, ps->pending, cnt);
+               bitmap_or(assigned, ps->inuse, ps->configured, cnt);
                update_smp_state(smp, cid, &assigned);
        }
 }
 
-/* step #3: after vblank, copy pending -> inuse: */
+/* step #3: after vblank, copy configured -> inuse: */
 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
 {
        int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
                 * using, which can be released and made available to other
                 * clients:
                 */
-               if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
+               if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
                        unsigned long flags;
 
                        spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
                        update_smp_state(smp, CID_UNUSED, &released);
                }
 
-               bitmap_copy(ps->inuse, ps->pending, cnt);
+               bitmap_copy(ps->inuse, ps->configured, cnt);
        }
 }
 
index e47179f635852a2f6ee546378200ce0a54a64499..5b6c2363f59280266a58a407a17c91b00eb8d9a3 100644 (file)
@@ -23,6 +23,7 @@
 
 struct mdp5_client_smp_state {
        mdp5_smp_state_t inuse;
+       mdp5_smp_state_t configured;
        mdp5_smp_state_t pending;
 };
 
index 1b22d8bfe142097f507435613b5bbd25311d4700..1ceb4f22dd8997a7b4e772d82e646abb1a87c7ff 100644 (file)
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
 
        timeout = ktime_add_ms(ktime_get(), 1000);
 
-       ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
-       if (ret) {
-               WARN_ON(ret);  // TODO unswap state back?  or??
-               commit_destroy(c);
-               return ret;
-       }
+       /* uninterruptible wait */
+       msm_wait_fence(dev, c->fence, &timeout, false);
 
        complete_commit(c);
 
index b7ef56ed8d1cf5280f942d83d2305672b414e0d8..d3467b115e0482a6eca2ebd6a5810e31f69220fa 100644 (file)
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
  * Fences:
  */
 
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
-               ktime_t *timeout)
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+               ktime_t *timeout , bool interruptible)
 {
        struct msm_drm_private *priv = dev->dev_private;
        int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
                        remaining_jiffies = timespec_to_jiffies(&ts);
                }
 
-               ret = wait_event_interruptible_timeout(priv->fence_event,
+               if (interruptible)
+                       ret = wait_event_interruptible_timeout(priv->fence_event,
+                               fence_completed(dev, fence),
+                               remaining_jiffies);
+               else
+                       ret = wait_event_timeout(priv->fence_event,
                                fence_completed(dev, fence),
                                remaining_jiffies);
 
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       return msm_wait_fence_interruptable(dev, args->fence, &timeout);
+       return msm_wait_fence(dev, args->fence, &timeout, true);
 }
 
 static const struct drm_ioctl_desc msm_ioctls[] = {
index e7c5ea125d45ed42ebaa043522a88b4bcb2c2e3f..4ff0ec9c994b33f84421bd800e88abd8f7b3834a 100644 (file)
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
 
 int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
 
-int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
-               ktime_t *timeout);
+int msm_wait_fence(struct drm_device *dev, uint32_t fence,
+               ktime_t *timeout, bool interruptible);
 int msm_queue_fence_cb(struct drm_device *dev,
                struct msm_fence_cb *cb, uint32_t fence);
 void msm_update_fence(struct drm_device *dev, uint32_t fence);
index f211b80e3a1e0604489b1ed91a65e4e1b489b894..c76cc853b08a57effec626b8c6f537b270ca61ac 100644 (file)
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
                if (op & MSM_PREP_NOSYNC)
                        timeout = NULL;
 
-               ret = msm_wait_fence_interruptable(dev, fence, timeout);
+               ret = msm_wait_fence(dev, fence, timeout, true);
        }
 
        /* TODO cache maintenance */
index dd7a7ab603e2c202ea297575fab0aba003234f5b..831461bc98a549e8a3627cbd1cf5a1b4a3250c7b 100644 (file)
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       BUG_ON(!msm_obj->sgt);  /* should have already pinned! */
-       return msm_obj->sgt;
+       int npages = obj->size >> PAGE_SHIFT;
+
+       if (WARN_ON(!msm_obj->pages))  /* should have already pinned! */
+               return NULL;
+
+       return drm_prime_pages_to_sg(msm_obj->pages, npages);
 }
 
 void *msm_gem_prime_vmap(struct drm_gem_object *obj)
index 649024d4daf1da6e5aa1e074e303ba02ec312de0..477cbb12809b029c2de6d62bdd0bcb3ba415001c 100644 (file)
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
        nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
        nvif_client_fini(&cli->base);
        usif_client_fini(cli);
+       kfree(cli);
 }
 
 static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
 
        pm_runtime_get_sync(dev->dev);
 
+       mutex_lock(&cli->mutex);
        if (cli->abi16)
                nouveau_abi16_fini(cli->abi16);
+       mutex_unlock(&cli->mutex);
 
        mutex_lock(&drm->client.mutex);
        list_del(&cli->head);
index 775277f1edb0a4ae4c1a2862418827878710ddb8..dcfbbfaf1739781724e312a4fc15cded1298358d 100644 (file)
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_IOMMU_API)
+
 static void nouveau_platform_probe_iommu(struct device *dev,
                                         struct nouveau_platform_gpu *gpu)
 {
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
        }
 }
 
+#else
+
+static void nouveau_platform_probe_iommu(struct device *dev,
+                                        struct nouveau_platform_gpu *gpu)
+{
+}
+
+static void nouveau_platform_remove_iommu(struct device *dev,
+                                         struct nouveau_platform_gpu *gpu)
+{
+}
+
+#endif
+
 static int nouveau_platform_probe(struct platform_device *pdev)
 {
        struct nouveau_platform_gpu *gpu;
index 18f4497157885a897a9cbec14c390d397a02ca25..7464aef34674965bbf077570b703abf5bf2630e5 100644 (file)
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
        node->page_shift = 12;
 
        switch (drm->device.info.family) {
+       case NV_DEVICE_INFO_V0_TNT:
+       case NV_DEVICE_INFO_V0_CELSIUS:
+       case NV_DEVICE_INFO_V0_KELVIN:
+       case NV_DEVICE_INFO_V0_RANKINE:
+       case NV_DEVICE_INFO_V0_CURIE:
+               break;
        case NV_DEVICE_INFO_V0_TESLA:
                if (drm->device.info.chipset != 0x50)
                        node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
                break;
        case NV_DEVICE_INFO_V0_FERMI:
        case NV_DEVICE_INFO_V0_KEPLER:
+       case NV_DEVICE_INFO_V0_MAXWELL:
                node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
                break;
        default:
+               NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
+                       drm->device.info.family);
                break;
        }
 
index 4ef602c5469d2563ee89d4153f0315abdc485a20..495c57644ced91e4a45c7b8b6205fff7d32ff8ae 100644 (file)
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
        if (ret)
                return ret;
 
-       if (RING_SPACE(chan, 49)) {
+       if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
                nouveau_fbcon_gpu_lockup(info);
                return 0;
        }
index 7da7958556a3abe68f94eac820c62aa8b41a923a..981342d142ff61b6c6292fc14399eb7a43ad08a6 100644 (file)
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
 {
        struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
 
-       if (show && nv_crtc->cursor.nvbo)
+       if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
                nv50_crtc_cursor_show(nv_crtc);
        else
                nv50_crtc_cursor_hide(nv_crtc);
index 394c89abcc97d92cf1b0352ea6e060b9ee011027..901130b0607291da4d08401f82e6ed05688c84b5 100644 (file)
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
        if (ret)
                return ret;
 
-       ret = RING_SPACE(chan, 59);
+       ret = RING_SPACE(chan, 58);
        if (ret) {
                nouveau_fbcon_gpu_lockup(info);
                return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
        OUT_RING(chan, info->var.yres_virtual);
        OUT_RING(chan, upper_32_bits(fb->vma.offset));
        OUT_RING(chan, lower_32_bits(fb->vma.offset));
+       FIRE_RING(chan);
 
        return 0;
 }
index 61246677e8dcdd901119a84d5bc4e6c09287978b..fcd2e5f27bb9539ba113e790222713b10127f825 100644 (file)
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
                return -EINVAL;
        }
 
-       ret = RING_SPACE(chan, 60);
+       ret = RING_SPACE(chan, 58);
        if (ret) {
                WARN_ON(1);
                nouveau_fbcon_gpu_lockup(info);
index 9ef6728c528d999ef625a9c1472de95c0c02ba24..7f2f05f78cc8cb7a17e7c7f2445c741981c9389a 100644 (file)
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
                case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
                default:
                        nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
-                       return 0x0000;
+                       return NULL;
                }
        }
 
index e10f9644140f5d9fcd6e73446c74634d2b13906a..52c22b02600598cfa7d18e424d69a99cce4879e7 100644 (file)
@@ -165,15 +165,31 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
        return 0;
 }
 
+static int
+gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
+{
+       struct nvkm_object *obj = (void *)chan;
+       struct gk104_fifo_priv *priv = (void *)obj->engine;
+
+       nv_wr32(priv, 0x002634, chan->base.chid);
+       if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
+               nv_error(priv, "channel %d [%s] kick timeout\n",
+                        chan->base.chid, nvkm_client_name(chan));
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 static int
 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                          struct nvkm_object *object)
 {
        struct nvkm_bar *bar = nvkm_bar(parent);
-       struct gk104_fifo_priv *priv = (void *)parent->engine;
        struct gk104_fifo_base *base = (void *)parent->parent;
        struct gk104_fifo_chan *chan = (void *)parent;
        u32 addr;
+       int ret;
 
        switch (nv_engidx(object->engine)) {
        case NVDEV_ENGINE_SW    : return 0;
@@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       nv_wr32(priv, 0x002634, chan->base.chid);
-       if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
-               nv_error(priv, "channel %d [%s] kick timeout\n",
-                        chan->base.chid, nvkm_client_name(chan));
-               if (suspend)
-                       return -EBUSY;
-       }
+       ret = gk104_fifo_chan_kick(chan);
+       if (ret && suspend)
+               return ret;
 
        if (addr) {
                nv_wo32(base, addr + 0x00, 0x00000000);
@@ -319,6 +331,7 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
                gk104_fifo_runlist_update(priv, chan->engine);
        }
 
+       gk104_fifo_chan_kick(chan);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
        return nvkm_fifo_channel_fini(&chan->base, suspend);
 }
index 5606c25e5d02998fc415a5b9705d9a40868f2a10..ca11ddb6ed467588ebe5cb98e359b91e07da92c4 100644 (file)
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
                gf100_gr_zbc_clear_depth(priv, index);
 }
 
+/**
+ * Wait until GR goes idle. GR is considered idle if it is disabled by the
+ * MC (0x200) register, or GR is not busy and a context switch is not in
+ * progress.
+ */
+int
+gf100_gr_wait_idle(struct gf100_gr_priv *priv)
+{
+       unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
+       bool gr_enabled, ctxsw_active, gr_busy;
+
+       do {
+               /*
+                * required to make sure FIFO_ENGINE_STATUS (0x2640) is
+                * up-to-date
+                */
+               nv_rd32(priv, 0x400700);
+
+               gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
+               ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
+               gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
+
+               if (!gr_enabled || (!gr_busy && !ctxsw_active))
+                       return 0;
+       } while (time_before(jiffies, end_jiffies));
+
+       nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
+                gr_enabled, ctxsw_active, gr_busy);
+       return -EAGAIN;
+}
+
 void
 gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
 {
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
 
                while (addr < next) {
                        nv_wr32(priv, 0x400200, addr);
-                       nv_wait(priv, 0x400700, 0x00000002, 0x00000000);
+                       /**
+                        * Wait for GR to go idle after submitting a
+                        * GO_IDLE bundle
+                        */
+                       if ((addr & 0xffff) == 0xe100)
+                               gf100_gr_wait_idle(priv);
+                       nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
                        addr += init->pitch;
                }
        }
index 8af1a89eda84d13436b0345301a3e767cc710c19..c9533fdac4fc85a2b0189429d6564cbc119734a7 100644 (file)
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
        int ppc_nr;
 };
 
+int  gf100_gr_wait_idle(struct gf100_gr_priv *);
 void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
 void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
 void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
index 2006c445938d9493773ef2dbf9bb07d045a67b96..4cf36a3aa81460c2c062b6cc41e70571530c1eb8 100644 (file)
@@ -332,9 +332,12 @@ static void
 nvkm_perfctx_dtor(struct nvkm_object *object)
 {
        struct nvkm_pm *ppm = (void *)object->engine;
+       struct nvkm_perfctx *ctx = (void *)object;
+
        mutex_lock(&nv_subdev(ppm)->mutex);
-       nvkm_engctx_destroy(&ppm->context->base);
-       ppm->context = NULL;
+       nvkm_engctx_destroy(&ctx->base);
+       if (ppm->context == ctx)
+               ppm->context = NULL;
        mutex_unlock(&nv_subdev(ppm)->mutex);
 }
 
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        mutex_lock(&nv_subdev(ppm)->mutex);
        if (ppm->context == NULL)
                ppm->context = ctx;
-       mutex_unlock(&nv_subdev(ppm)->mutex);
-
        if (ctx != ppm->context)
-               return -EBUSY;
+               ret = -EBUSY;
+       mutex_unlock(&nv_subdev(ppm)->mutex);
 
-       return 0;
+       return ret;
 }
 
 struct nvkm_oclass
index f67cdae1e90a599a04c46dd7a654102a7731bdf0..f4611e3f097187002e68dc54af44c45bfd0006d9 100644 (file)
@@ -1284,6 +1284,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
        }
 }
 
+/**
+ * INIT_PLL_INDIRECT - opcode 0x59
+ *
+ */
+static void
+init_pll_indirect(struct nvbios_init *init)
+{
+       struct nvkm_bios *bios = init->bios;
+       u32  reg = nv_ro32(bios, init->offset + 1);
+       u16 addr = nv_ro16(bios, init->offset + 5);
+       u32 freq = (u32)nv_ro16(bios, addr) * 1000;
+
+       trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
+             reg, addr, freq);
+       init->offset += 7;
+
+       init_prog_pll(init, reg, freq);
+}
+
+/**
+ * INIT_ZM_REG_INDIRECT - opcode 0x5a
+ *
+ */
+static void
+init_zm_reg_indirect(struct nvbios_init *init)
+{
+       struct nvkm_bios *bios = init->bios;
+       u32  reg = nv_ro32(bios, init->offset + 1);
+       u16 addr = nv_ro16(bios, init->offset + 5);
+       u32 data = nv_ro32(bios, addr);
+
+       trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
+             reg, addr, data);
+       init->offset += 7;
+
+       init_wr32(init, addr, data);
+}
+
 /**
  * INIT_SUB_DIRECT - opcode 0x5b
  *
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
        [0x56] = { init_condition_time },
        [0x57] = { init_ltime },
        [0x58] = { init_zm_reg_sequence },
+       [0x59] = { init_pll_indirect },
+       [0x5a] = { init_zm_reg_indirect },
        [0x5b] = { init_sub_direct },
        [0x5c] = { init_jump },
        [0x5e] = { init_i2c_if },
index 822d32a28d6e15a38f8c9db722288c27c9349d41..065e9f5c8db98a0a2b70b5b2d4a203d350899465 100644 (file)
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
               struct gt215_clk_info *info)
 {
        struct gt215_clk_priv *priv = (void *)clock;
-       u32 oclk, sclk, sdiv, diff;
+       u32 oclk, sclk, sdiv;
+       s32 diff;
 
        info->clk = 0;
 
index c0fdb89e74ac4a41944e2d7c2f41ca3daa6c4ed6..24dcdfb58a8d852039317a5017ee34dcc9cf3039 100644 (file)
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
        nv_wr32(priv, 0x12004c, 0x4);
        nv_wr32(priv, 0x122204, 0x2);
        nv_rd32(priv, 0x122204);
+
+       /*
+        * Bug: increase clock timeout to avoid operation failure at high
+        * gpcclk rate.
+        */
+       nv_wr32(priv, 0x122354, 0x800);
+       nv_wr32(priv, 0x128328, 0x800);
+       nv_wr32(priv, 0x124320, 0x800);
 }
 
 static void
index 80614f1b207474ae407251d6811add96925e8241..282143f49d72e60ba5c66f79fc818ca3b37ea942 100644 (file)
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
 {
        struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
        struct nv04_instobj_priv *node = (void *)object;
+       struct nvkm_subdev *subdev = (void *)priv;
+
+       mutex_lock(&subdev->mutex);
        nvkm_mm_free(&priv->heap, &node->mem);
+       mutex_unlock(&subdev->mutex);
+
        nvkm_instobj_destroy(&node->base);
 }
 
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
        struct nv04_instobj_priv *node;
        struct nvkm_instobj_args *args = data;
+       struct nvkm_subdev *subdev = (void *)priv;
        int ret;
 
        if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
+       mutex_lock(&subdev->mutex);
        ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
                           args->align, &node->mem);
+       mutex_unlock(&subdev->mutex);
        if (ret)
                return ret;
 
index dd39f434b4a7eccf1446ff5cd1857a2cf5de972d..c3872598b85a3856787b1bf0b7113633a468e020 100644 (file)
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        encoder_mode = atombios_get_encoder_mode(encoder);
        if (connector && (radeon_audio != 0) &&
            ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
-            (ENCODER_MODE_IS_DP(encoder_mode) &&
-             drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+            ENCODER_MODE_IS_DP(encoder_mode)))
                radeon_audio_mode_set(encoder, adjusted_mode);
 }
 
index 8730562323a8b77d0dd86ec399207806a6807ddc..4a09947be24457fed430abeaf0fa308938574413 100644 (file)
@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
                        tmp |= DPM_ENABLED;
                        break;
                default:
-                       DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
+                       DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
                        break;
                }
                WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
index 68fd9fc677e35f1ca161295694301ec21c089e4d..44480c1b9738cc2625cee481ccf02aa20ed6bb1f 100644 (file)
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->offset;
-
-       WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
-              AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
+       WREG32(AFMT_AUDIO_SRC_CONTROL +  dig->afmt->offset,
+              AFMT_AUDIO_SRC_SELECT(dig->pin->id));
 }
 
 void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
-               struct drm_connector *connector, struct drm_display_mode *mode)
+                                   struct drm_connector *connector,
+                                   struct drm_display_mode *mode)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 tmp = 0, offset;
+       u32 tmp = 0;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
                if (connector->latency_present[1])
                        tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
                else
                        tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
        }
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
 }
 
 void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
-       u8 *sadb, int sad_count)
+                                            u8 *sadb, int sad_count)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset, tmp;
+       u32 tmp;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        /* program the speaker allocation */
-       tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = RREG32_ENDPOINT(dig->pin->offset,
+                             AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
        tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
        /* set HDMI mode */
        tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
                tmp |= SPEAKER_ALLOCATION(sadb[0]);
        else
                tmp |= SPEAKER_ALLOCATION(5); /* stereo */
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
 }
 
 void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
-       u8 *sadb, int sad_count)
+                                          u8 *sadb, int sad_count)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       u32 offset, tmp;
+       u32 tmp;
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        /* program the speaker allocation */
-       tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+       tmp = RREG32_ENDPOINT(dig->pin->offset,
+                             AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
        tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
        /* set DP mode */
        tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
                tmp |= SPEAKER_ALLOCATION(sadb[0]);
        else
                tmp |= SPEAKER_ALLOCATION(5); /* stereo */
-       WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+       WREG32_ENDPOINT(dig->pin->offset,
+                       AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
 }
 
 void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
-       struct cea_sad *sads, int sad_count)
+                             struct cea_sad *sads, int sad_count)
 {
-       u32 offset;
        int i;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
                { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
        };
 
-       if (!dig || !dig->afmt || !dig->afmt->pin)
+       if (!dig || !dig->afmt || !dig->pin)
                return;
 
-       offset = dig->afmt->pin->offset;
-
        for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
                u32 value = 0;
                u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
 
                value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
 
-               WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+               WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
        }
 }
 
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
 }
 
 void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                            struct radeon_crtc *crtc, unsigned int clock)
 {
        /* Two dtos; generally use dto0 for HDMI */
        u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
 }
 
 void dce6_dp_audio_set_dto(struct radeon_device *rdev,
-       struct radeon_crtc *crtc, unsigned int clock)
+                          struct radeon_crtc *crtc, unsigned int clock)
 {
        /* Two dtos; generally use dto1 for DP */
        u32 value = 0;
index fa719c53449bcd90e009e1b59d1b3e1ed5bff6f3..fbc8d88d6e5de1afe43c11884340e90bfa8e57a2 100644 (file)
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
 static void radeon_audio_enable(struct radeon_device *rdev,
                                struct r600_audio_pin *pin, u8 enable_mask)
 {
+       struct drm_encoder *encoder;
+       struct radeon_encoder *radeon_encoder;
+       struct radeon_encoder_atom_dig *dig;
+       int pin_count = 0;
+
+       if (!pin)
+               return;
+
+       if (rdev->mode_info.mode_config_initialized) {
+               list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
+                       if (radeon_encoder_is_digital(encoder)) {
+                               radeon_encoder = to_radeon_encoder(encoder);
+                               dig = radeon_encoder->enc_priv;
+                               if (dig->pin == pin)
+                                       pin_count++;
+                       }
+               }
+
+               if ((pin_count > 1) && (enable_mask == 0))
+                       return;
+       }
+
        if (rdev->audio.funcs->enable)
                rdev->audio.funcs->enable(rdev, pin, enable_mask);
 }
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
 
 static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
 {
-       struct radeon_encoder *radeon_encoder;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct cea_sad *sads;
        int sad_count;
 
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
 
        sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
        if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
        }
        BUG_ON(!sads);
 
-       radeon_encoder = to_radeon_encoder(encoder);
-
        if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
                radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
 
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
 
 static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
 {
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
        u8 *sadb = NULL;
        int sad_count;
 
-       list_for_each_entry(connector,
-                           &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
 
-       sad_count = drm_edid_to_speaker_allocation(
-               radeon_connector_edid(connector), &sadb);
+       sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
+                                                  &sadb);
        if (sad_count < 0) {
                DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
                          sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
 }
 
 static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                             struct drm_display_mode *mode)
 {
-       struct radeon_encoder *radeon_encoder;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = 0;
-
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
+       if (!connector)
                return;
-       }
-
-       radeon_encoder = to_radeon_encoder(encoder);
 
        if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
                radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
 }
 
 void radeon_audio_detect(struct drm_connector *connector,
+                        struct drm_encoder *encoder,
                         enum drm_connector_status status)
 {
-       struct radeon_device *rdev;
-       struct radeon_encoder *radeon_encoder;
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig;
 
-       if (!connector || !connector->encoder)
+       if (!radeon_audio_chipset_supported(rdev))
                return;
 
-       rdev = connector->encoder->dev->dev_private;
-
-       if (!radeon_audio_chipset_supported(rdev))
+       if (!radeon_encoder_is_digital(encoder))
                return;
 
-       radeon_encoder = to_radeon_encoder(connector->encoder);
        dig = radeon_encoder->enc_priv;
 
        if (status == connector_status_connected) {
-               if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-                       radeon_encoder->audio = NULL;
-                       return;
-               }
-
                if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
                        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
                }
 
-               dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-               radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+                       if (!dig->pin)
+                               dig->pin = radeon_audio_get_pin(encoder);
+                       radeon_audio_enable(rdev, dig->pin, 0xf);
+               } else {
+                       radeon_audio_enable(rdev, dig->pin, 0);
+                       dig->pin = NULL;
+               }
        } else {
-               radeon_audio_enable(rdev, dig->afmt->pin, 0);
-               dig->afmt->pin = NULL;
+               radeon_audio_enable(rdev, dig->pin, 0);
+               dig->pin = NULL;
        }
 }
 
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
 }
 
 static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                      struct drm_display_mode *mode)
 {
        struct radeon_device *rdev = encoder->dev->dev_private;
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector = NULL;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
        u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
        struct hdmi_avi_infoframe frame;
        int err;
 
-       list_for_each_entry(connector,
-               &encoder->dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       radeon_connector = to_radeon_connector(connector);
-                       break;
-               }
-       }
-
-       if (!radeon_connector) {
-               DRM_ERROR("Couldn't find encoder's connector\n");
-               return -ENOENT;
-       }
+       if (!connector)
+               return -EINVAL;
 
        err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
        if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
                return err;
        }
 
-       if (dig && dig->afmt &&
-               radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
+       if (dig && dig->afmt && radeon_encoder->audio &&
+           radeon_encoder->audio->set_avi_packet)
                radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
                        buffer, sizeof(buffer));
 
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
        if (!dig || !dig->afmt)
                return;
 
-       radeon_audio_set_mute(encoder, true);
+       if (!connector)
+               return;
 
-       radeon_audio_write_speaker_allocation(encoder);
-       radeon_audio_write_sad_regs(encoder);
-       radeon_audio_write_latency_fields(encoder, mode);
-       radeon_audio_set_dto(encoder, mode->clock);
-       radeon_audio_set_vbi_packet(encoder);
-       radeon_hdmi_set_color_depth(encoder);
-       radeon_audio_update_acr(encoder, mode->clock);
-       radeon_audio_set_audio_packet(encoder);
-       radeon_audio_select_pin(encoder);
+       if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+               radeon_audio_set_mute(encoder, true);
 
-       if (radeon_audio_set_avi_packet(encoder, mode) < 0)
-               return;
+               radeon_audio_write_speaker_allocation(encoder);
+               radeon_audio_write_sad_regs(encoder);
+               radeon_audio_write_latency_fields(encoder, mode);
+               radeon_audio_set_dto(encoder, mode->clock);
+               radeon_audio_set_vbi_packet(encoder);
+               radeon_hdmi_set_color_depth(encoder);
+               radeon_audio_update_acr(encoder, mode->clock);
+               radeon_audio_set_audio_packet(encoder);
+               radeon_audio_select_pin(encoder);
+
+               if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+                       return;
 
-       radeon_audio_set_mute(encoder, false);
+               radeon_audio_set_mute(encoder, false);
+       } else {
+               radeon_hdmi_set_color_depth(encoder);
+
+               if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+                       return;
+       }
 }
 
 static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                                    struct drm_display_mode *mode)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        if (!dig || !dig->afmt)
                return;
 
-       radeon_audio_write_speaker_allocation(encoder);
-       radeon_audio_write_sad_regs(encoder);
-       radeon_audio_write_latency_fields(encoder, mode);
-       if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
-               radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
-       else
-               radeon_audio_set_dto(encoder, dig_connector->dp_clock);
-       radeon_audio_set_audio_packet(encoder);
-       radeon_audio_select_pin(encoder);
-
-       if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+       if (!connector)
                return;
+
+       if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+               radeon_audio_write_speaker_allocation(encoder);
+               radeon_audio_write_sad_regs(encoder);
+               radeon_audio_write_latency_fields(encoder, mode);
+               if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+                       radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+               else
+                       radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+               radeon_audio_set_audio_packet(encoder);
+               radeon_audio_select_pin(encoder);
+
+               if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+                       return;
+       }
 }
 
 void radeon_audio_mode_set(struct drm_encoder *encoder,
-       struct drm_display_mode *mode)
+                          struct drm_display_mode *mode)
 {
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
index 8438304f7139549c9ab785ec3f1decd50ac3254a..059cc3012062a7b50708620c557771dcfea821c0 100644 (file)
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
 
 int radeon_audio_init(struct radeon_device *rdev);
 void radeon_audio_detect(struct drm_connector *connector,
-       enum drm_connector_status status);
+                        struct drm_encoder *encoder,
+                        enum drm_connector_status status);
 u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
        u32 offset, u32 reg);
 void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
index 3e5f6b71f3adad72b0aae490ede2ae1f426c9ff5..c097d3a82bda734888ca46d05c14794738163642 100644 (file)
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
 
                        if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
                            (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
+                               u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+
+                               if (hss > lvds->native_mode.hdisplay)
+                                       hss = (10 - 1) * 8;
+
                                lvds->native_mode.htotal = lvds->native_mode.hdisplay +
                                        (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
                                lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
-                                       (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
+                                       hss;
                                lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
                                        (RBIOS8(tmp + 23) * 8);
 
index cebb65e07e1d13f0bee01ee753f4c8a76e0e22d5..94b21ae70ef725781c8dc7d0ec8a30fbd48f0e0e 100644 (file)
@@ -1379,8 +1379,16 @@ out:
        /* updated in get modes as well since we need to know if it's analog or digital */
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0)
-               radeon_audio_detect(connector, ret);
+       if ((radeon_audio != 0) && radeon_connector->use_digital) {
+               const struct drm_connector_helper_funcs *connector_funcs =
+                       connector->helper_private;
+
+               encoder = connector_funcs->best_encoder(connector);
+               if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
+                       radeon_connector_get_edid(connector);
+                       radeon_audio_detect(connector, encoder, ret);
+               }
+       }
 
 exit:
        pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
 
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0)
-               radeon_audio_detect(connector, ret);
+       if ((radeon_audio != 0) && encoder) {
+               radeon_connector_get_edid(connector);
+               radeon_audio_detect(connector, encoder, ret);
+       }
 
 out:
        pm_runtime_mark_last_busy(connector->dev->dev);
index 5450fa95a47efdcde9aa664c740cbe578e4f5b26..c4777c8d0312a047790bcd309ac00fba2c8a02a4 100644 (file)
@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
                        }
                }
        }
-       mb();
-       radeon_gart_tlb_flush(rdev);
+       if (rdev->gart.ptr) {
+               mb();
+               radeon_gart_tlb_flush(rdev);
+       }
 }
 
 /**
@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
                        page_base += RADEON_GPU_PAGE_SIZE;
                }
        }
-       mb();
-       radeon_gart_tlb_flush(rdev);
+       if (rdev->gart.ptr) {
+               mb();
+               radeon_gart_tlb_flush(rdev);
+       }
        return 0;
 }
 
index 013ec7106e555a2862c4d8b7194e25eaaabc7572..3dcc5733ff6915b2e2497ca3d4ff800455f49c20 100644 (file)
@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
        if (robj) {
                if (robj->gem_base.import_attach)
                        drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
+               radeon_mn_unregister(robj);
                radeon_bo_unref(&robj);
        }
 }
index 07909d817381a21a18fdeb5dd7cd8b4be92fbc8e..aecc3e3dec0ca093441e3871df414627b51e92ec 100644 (file)
@@ -237,7 +237,6 @@ struct radeon_afmt {
        int offset;
        bool last_buffer_filled_status;
        int id;
-       struct r600_audio_pin *pin;
 };
 
 struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
        uint8_t backlight_level;
        int panel_mode;
        struct radeon_afmt *afmt;
+       struct r600_audio_pin *pin;
        int active_mst_links;
 };
 
index 318165d4855c4bf3aa9e4a23bddc38cc25481968..676362769b8dbfc9dea20e8c76c02e4208950b29 100644 (file)
@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
        bo = container_of(tbo, struct radeon_bo, tbo);
 
        radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
-       radeon_mn_unregister(bo);
 
        mutex_lock(&bo->rdev->gem.mutex);
        list_del_init(&bo->list);
index 1dbdf3230daed2b4c829e3ed5b9800a007691fea..787cd8fd897faf52e5874cb0183a07bc36594fdc 100644 (file)
@@ -2926,6 +2926,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
        /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
        { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
        { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+       { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
        { 0, 0, 0, 0 },
 };
 
index 01b558fe369539f447d36493f3bcd7e1bc4f3ded..9a0c2911272a9062a65f10096928e73edd35dd4f 100644 (file)
@@ -555,7 +555,6 @@ static struct platform_driver rockchip_drm_platform_driver = {
        .probe = rockchip_drm_platform_probe,
        .remove = rockchip_drm_platform_remove,
        .driver = {
-               .owner = THIS_MODULE,
                .name = "rockchip-drm",
                .of_match_table = rockchip_drm_dt_ids,
                .pm = &rockchip_drm_pm_ops,
index 77d52893d40f6086082887d22c3530a7de6b9048..002645bb5bbf9b83f30c0f383050c817d8e43dbc 100644 (file)
@@ -162,7 +162,8 @@ static void rockchip_drm_output_poll_changed(struct drm_device *dev)
        struct rockchip_drm_private *private = dev->dev_private;
        struct drm_fb_helper *fb_helper = &private->fbdev_helper;
 
-       drm_fb_helper_hotplug_event(fb_helper);
+       if (fb_helper)
+               drm_fb_helper_hotplug_event(fb_helper);
 }
 
 static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
index eb2282cc4a56507819a5ab638af504499157157a..eba5f8a52fbd9ff2bf7941687f54e11320cb10b3 100644 (file)
@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
                       &rk_obj->dma_attrs);
 }
 
-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
-                         struct vm_area_struct *vma)
+static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
+                                       struct vm_area_struct *vma)
+
 {
+       int ret;
        struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
        struct drm_device *drm = obj->dev;
-       unsigned long vm_size;
 
-       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
-       vm_size = vma->vm_end - vma->vm_start;
-
-       if (vm_size > obj->size)
-               return -EINVAL;
+       /*
+        * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
+        * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
+        */
+       vma->vm_flags &= ~VM_PFNMAP;
 
-       return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+       ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
                             obj->size, &rk_obj->dma_attrs);
+       if (ret)
+               drm_gem_vm_close(vma);
+
+       return ret;
 }
 
-/* drm driver mmap file operations */
-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+                         struct vm_area_struct *vma)
 {
-       struct drm_file *priv = filp->private_data;
-       struct drm_device *dev = priv->minor->dev;
-       struct drm_gem_object *obj;
-       struct drm_vma_offset_node *node;
+       struct drm_device *drm = obj->dev;
        int ret;
 
-       if (drm_device_is_unplugged(dev))
-               return -ENODEV;
+       mutex_lock(&drm->struct_mutex);
+       ret = drm_gem_mmap_obj(obj, obj->size, vma);
+       mutex_unlock(&drm->struct_mutex);
+       if (ret)
+               return ret;
 
-       mutex_lock(&dev->struct_mutex);
+       return rockchip_drm_gem_object_mmap(obj, vma);
+}
 
-       node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
-                                          vma->vm_pgoff,
-                                          vma_pages(vma));
-       if (!node) {
-               mutex_unlock(&dev->struct_mutex);
-               DRM_ERROR("failed to find vma node.\n");
-               return -EINVAL;
-       } else if (!drm_vma_node_is_allowed(node, filp)) {
-               mutex_unlock(&dev->struct_mutex);
-               return -EACCES;
-       }
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       struct drm_gem_object *obj;
+       int ret;
 
-       obj = container_of(node, struct drm_gem_object, vma_node);
-       ret = rockchip_gem_mmap_buf(obj, vma);
+       ret = drm_gem_mmap(filp, vma);
+       if (ret)
+               return ret;
 
-       mutex_unlock(&dev->struct_mutex);
+       obj = vma->vm_private_data;
 
-       return ret;
+       return rockchip_drm_gem_object_mmap(obj, vma);
 }
 
 struct rockchip_gem_object *
index dc65161d7cad20acb2f079bdb6b229cf779b3613..34b78e73653248316fcd24732d03d253454c6e50 100644 (file)
@@ -170,6 +170,7 @@ struct vop_win_phy {
 
        struct vop_reg enable;
        struct vop_reg format;
+       struct vop_reg rb_swap;
        struct vop_reg act_info;
        struct vop_reg dsp_info;
        struct vop_reg dsp_st;
@@ -199,8 +200,12 @@ struct vop_data {
 static const uint32_t formats_01[] = {
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ABGR8888,
        DRM_FORMAT_RGB888,
+       DRM_FORMAT_BGR888,
        DRM_FORMAT_RGB565,
+       DRM_FORMAT_BGR565,
        DRM_FORMAT_NV12,
        DRM_FORMAT_NV16,
        DRM_FORMAT_NV24,
@@ -209,8 +214,12 @@ static const uint32_t formats_01[] = {
 static const uint32_t formats_234[] = {
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_ABGR8888,
        DRM_FORMAT_RGB888,
+       DRM_FORMAT_BGR888,
        DRM_FORMAT_RGB565,
+       DRM_FORMAT_BGR565,
 };
 
 static const struct vop_win_phy win01_data = {
@@ -218,6 +227,7 @@ static const struct vop_win_phy win01_data = {
        .nformats = ARRAY_SIZE(formats_01),
        .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
        .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
+       .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
        .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
        .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
        .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
@@ -234,6 +244,7 @@ static const struct vop_win_phy win23_data = {
        .nformats = ARRAY_SIZE(formats_234),
        .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
        .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
+       .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
        .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
        .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
        .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
@@ -242,15 +253,6 @@ static const struct vop_win_phy win23_data = {
        .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
 };
 
-static const struct vop_win_phy cursor_data = {
-       .data_formats = formats_234,
-       .nformats = ARRAY_SIZE(formats_234),
-       .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
-       .format = VOP_REG(HWC_CTRL0, 0x7, 1),
-       .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
-       .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
-};
-
 static const struct vop_ctrl ctrl_data = {
        .standby = VOP_REG(SYS_CTRL, 0x1, 22),
        .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
@@ -282,14 +284,14 @@ static const struct vop_reg_data vop_init_reg_table[] = {
 /*
  * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
  * special support to get alpha blending working.  For now, just use overlay
- * window 1 for the drm cursor.
+ * window 3 for the drm cursor.
+ *
  */
 static const struct vop_win_data rk3288_vop_win_data[] = {
        { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
-       { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
+       { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
        { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
-       { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
-       { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
+       { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
 };
 
 static const struct vop_data rk3288_vop = {
@@ -352,15 +354,32 @@ static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
        }
 }
 
+static bool has_rb_swapped(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+       case DRM_FORMAT_BGR888:
+       case DRM_FORMAT_BGR565:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static enum vop_data_format vop_convert_format(uint32_t format)
 {
        switch (format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
                return VOP_FMT_ARGB8888;
        case DRM_FORMAT_RGB888:
+       case DRM_FORMAT_BGR888:
                return VOP_FMT_RGB888;
        case DRM_FORMAT_RGB565:
+       case DRM_FORMAT_BGR565:
                return VOP_FMT_RGB565;
        case DRM_FORMAT_NV12:
                return VOP_FMT_YUV420SP;
@@ -378,6 +397,7 @@ static bool is_alpha_support(uint32_t format)
 {
        switch (format) {
        case DRM_FORMAT_ARGB8888:
+       case DRM_FORMAT_ABGR8888:
                return true;
        default:
                return false;
@@ -588,6 +608,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
        enum vop_data_format format;
        uint32_t val;
        bool is_alpha;
+       bool rb_swap;
        bool visible;
        int ret;
        struct drm_rect dest = {
@@ -621,6 +642,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
                return 0;
 
        is_alpha = is_alpha_support(fb->pixel_format);
+       rb_swap = has_rb_swapped(fb->pixel_format);
        format = vop_convert_format(fb->pixel_format);
        if (format < 0)
                return format;
@@ -689,6 +711,7 @@ static int vop_update_plane_event(struct drm_plane *plane,
        val = (dsp_sty - 1) << 16;
        val |= (dsp_stx - 1) & 0xffff;
        VOP_WIN_SET(vop, win, dsp_st, val);
+       VOP_WIN_SET(vop, win, rb_swap, rb_swap);
 
        if (is_alpha) {
                VOP_WIN_SET(vop, win, dst_alpha_ctl,
index 882cccdad27249c8e3afa992419fb0fb16bec97b..ac6fe40b99f753b267a5e78647c0bdb2e078c5bd 100644 (file)
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
        else if (boot_cpu_data.x86 > 3)
                tmp = pgprot_noncached(tmp);
 #endif
-#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__)
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+    defined(__powerpc__)
        if (caching_flags & TTM_PL_FLAG_WC)
                tmp = pgprot_writecombine(tmp);
        else
index 3077f1554099932f6f477841182fb4eb278c60ea..624d941aaad163f6b81ddff616f9fa868f1c28ea 100644 (file)
@@ -963,14 +963,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
        } else {
                pool->npages_free += count;
                list_splice(&ttm_dma->pages_list, &pool->free_list);
-               npages = count;
-               if (pool->npages_free > _manager->options.max_size) {
+               /*
+                * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
+                * to free in order to minimize calls to set_memory_wb().
+                */
+               if (pool->npages_free >= (_manager->options.max_size +
+                                         NUM_PAGES_TO_ALLOC))
                        npages = pool->npages_free - _manager->options.max_size;
-                       /* free at least NUM_PAGES_TO_ALLOC number of pages
-                        * to reduce calls to set_memory_wb */
-                       if (npages < NUM_PAGES_TO_ALLOC)
-                               npages = NUM_PAGES_TO_ALLOC;
-               }
        }
        spin_unlock_irqrestore(&pool->lock, irq_flags);
 
index 6d2f39d36e445bda4840f06d7eb1f779081f2c22..00f2058944e55872ba25673744e648ea6bc0a12e 100644 (file)
@@ -1107,6 +1107,9 @@ static int ipu_irq_init(struct ipu_soc *ipu)
                return ret;
        }
 
+       for (i = 0; i < IPU_NUM_IRQS; i += 32)
+               ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
+
        for (i = 0; i < IPU_NUM_IRQS; i += 32) {
                gc = irq_get_domain_generic_chip(ipu->domain, i);
                gc->reg_base = ipu->cm_reg;
index f822fd2a1adabc4b3e53d86155c2d9e50d3d241e..884d82f9190e214636a539cb2db884a1d175c945 100644 (file)
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
                .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
+               .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
+               .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
index 157c627750535e8943769e3078de068e4f1c7a47..e6fce23b121adb662656bea26d8a4ace812afc9c 100644 (file)
@@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
        { }
index 3318de690e00666bf7da60c84189a1fc54a0a544..a2dbbbe0d8d7e81b06ac6d646737413fe4d1d357 100644 (file)
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
        struct cp2112_force_read_report report;
        int ret;
 
+       if (size > sizeof(dev->read_data))
+               size = sizeof(dev->read_data);
        report.report = CP2112_DATA_READ_FORCE_SEND;
        report.length = cpu_to_be16(size);
 
index b04b0820d816323a01d147c702503b0797734ea4..b3b225b75d0ab7e497c389d0c9caa777da1f6948 100644 (file)
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI   0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO    0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS    0x0292
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI   0x0272
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO            0x0273
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS            0x0274
 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY   0x030a
 #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY    0x030b
 #define USB_DEVICE_ID_APPLE_IRCONTROL  0x8240
index 6a9b05b328a9d40e06b09ed8f8c6e6010669374b..7c811252c1cefebb2418944a7f391117a8a92b6d 100644 (file)
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
        /*
         * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
         * for the stylus.
+        * The check for mt_report_id ensures we don't process
+        * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
+        * collection, but within the report ID.
         */
        if (field->physical == HID_DG_STYLUS)
                return 0;
+       else if ((field->physical == 0) &&
+                (field->report->id != td->mt_report_id) &&
+                (td->mt_report_id != -1))
+               return 0;
 
        if (field->application == HID_DG_TOUCHSCREEN ||
            field->application == HID_DG_TOUCHPAD)
index 53e7de7cb9e25e6861f1acb5e3438e590b2e70ad..20f9a653444c21d0ef89f4561d4d68544dff4bf0 100644 (file)
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
index 4c0ffca97befd61cf3cdd947138d14070c4e7895..44958d79d598dfc3a7e6938a2babbf3e1fdc2188 100644 (file)
@@ -1271,11 +1271,13 @@ fail_leds:
        pad_input_dev = NULL;
        wacom_wac->pad_registered = false;
 fail_register_pad_input:
-       input_unregister_device(touch_input_dev);
+       if (touch_input_dev)
+               input_unregister_device(touch_input_dev);
        wacom_wac->touch_input = NULL;
        wacom_wac->touch_registered = false;
 fail_register_touch_input:
-       input_unregister_device(pen_input_dev);
+       if (pen_input_dev)
+               input_unregister_device(pen_input_dev);
        wacom_wac->pen_input = NULL;
        wacom_wac->pen_registered = false;
 fail_register_pen_input:
index 232da89f4e886fe02b82d452c1a0868f0b65b967..0d244239e55def103f786254f3f617a84b2f0ba2 100644 (file)
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                        features->x_max = 4096;
                        features->y_max = 4096;
                }
+               else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
+                       features->device_type |= WACOM_DEVICETYPE_PAD;
+               }
        }
 
        /*
index 28fcb2e246d55a7acc52703e434b98de3e22c45b..fbfc02bb2cfa13c5bc22ece3a47a3cac1af07005 100644 (file)
@@ -195,7 +195,7 @@ abort:
 }
 
 static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
-                                unsigned int voltage)
+                                unsigned long voltage)
 {
        int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
        int err;
index b77b82f244800628843c103ae7dfcf157bd774ce..6153df735e82ca4fd3d605e159510675410546fc 100644 (file)
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
        return sprintf(buf, "%d\n", val);
 }
 
-static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
-                         const char *buf, size_t count)
+static ssize_t store_enable(struct device *dev,
+                           struct device_attribute *devattr,
+                           const char *buf, size_t count)
 {
        int index = to_sensor_dev_attr(devattr)->index;
        struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
 
        if (kstrtoul(buf, 10, &val) < 0)
                return -EINVAL;
-       if (val > 1 || (val && !data->fan_mode[index]))
+       if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
                return -EINVAL;
 
        ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
-                               val ? data->fan_mode[index] : 0);
+                               val == 2 ? data->fan_mode[index] : 0);
 
        return ret ? ret : count;
 }
 
-/* Return 0 for manual mode or 1 for SmartFan mode */
-static ssize_t show_mode(struct device *dev,
-                        struct device_attribute *devattr, char *buf)
+/* Return 1 for manual mode or 2 for SmartFan mode */
+static ssize_t show_enable(struct device *dev,
+                          struct device_attribute *devattr, char *buf)
 {
        int index = to_sensor_dev_attr(devattr)->index;
        struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
        if (val < 0)
                return val;
 
-       return sprintf(buf, "%d\n", val ? 1 : 0);
+       return sprintf(buf, "%d\n", val ? 2 : 1);
 }
 
 /* 2 attributes per channel: pwm and mode */
-static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 0);
-static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 0);
-static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 1);
-static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 1);
-static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 2);
-static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 2);
-static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 2);
+static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
                        show_pwm, store_pwm, 3);
-static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR,
-                       show_mode, store_mode, 3);
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
+                       show_enable, store_enable, 3);
 
 static struct attribute *nct7904_fanctl_attrs[] = {
-       &sensor_dev_attr_fan1_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan1_mode.dev_attr.attr,
-       &sensor_dev_attr_fan2_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan2_mode.dev_attr.attr,
-       &sensor_dev_attr_fan3_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan3_mode.dev_attr.attr,
-       &sensor_dev_attr_fan4_pwm.dev_attr.attr,
-       &sensor_dev_attr_fan4_mode.dev_attr.attr,
+       &sensor_dev_attr_pwm1.dev_attr.attr,
+       &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm2.dev_attr.attr,
+       &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm3.dev_attr.attr,
+       &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+       &sensor_dev_attr_pwm4.dev_attr.attr,
+       &sensor_dev_attr_pwm4_enable.dev_attr.attr,
        NULL
 };
 
index 4e70f51c237089f54ebee9882df499551d56b1e3..cc5a35750b507359f6496c980d30bdb7027ded76 100644 (file)
@@ -1464,7 +1464,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
 {
        int i;
 
-       for (i = from; i >= 0; i++) {
+       for (i = from; i >= 0; i--) {
                if (data->triggers[i].indio_trig) {
                        iio_trigger_unregister(data->triggers[i].indio_trig);
                        data->triggers[i].indio_trig = NULL;
index e8e2077c7244b5623efc8cd07521a9105dfbe192..13ea1ea23328501f4969c5453e603cf67e5b81b2 100644 (file)
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
        if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
                iio_push_event(indio_dev,
                               IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
-                                                 IIO_EV_TYPE_THRESH,
+                                                 IIO_EV_TYPE_MAG,
                                                  IIO_EV_DIR_RISING),
                               ts);
 
        if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
                iio_push_event(indio_dev,
                               IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
-                                                 IIO_EV_TYPE_THRESH,
+                                                 IIO_EV_TYPE_MAG,
                                                  IIO_EV_DIR_RISING),
                               ts);
 
        if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
                iio_push_event(indio_dev,
                               IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
-                                                 IIO_EV_TYPE_THRESH,
+                                                 IIO_EV_TYPE_MAG,
                                                  IIO_EV_DIR_RISING),
                               ts);
 }
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
 
 static const struct iio_event_spec mma8452_transient_event[] = {
        {
-               .type = IIO_EV_TYPE_THRESH,
+               .type = IIO_EV_TYPE_MAG,
                .dir = IIO_EV_DIR_RISING,
                .mask_separate = BIT(IIO_EV_INFO_ENABLE),
                .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
index 7c5565891cb83012f9034b3e9224629c08a00ced..eb0cd897714a26b619b9e6c924ce4c3a65a5806b 100644 (file)
@@ -153,8 +153,7 @@ config DA9150_GPADC
 
 config CC10001_ADC
        tristate "Cosmic Circuits 10001 ADC driver"
-       depends on HAVE_CLK || REGULATOR
-       depends on HAS_IOMEM
+       depends on HAS_IOMEM && HAVE_CLK && REGULATOR
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index 8a0eb4a04fb55b9cb2436db5b16f678f654b8a26..7b40925dd4ff297e56fa0a3541980e9964d14092 100644 (file)
@@ -182,7 +182,7 @@ struct at91_adc_caps {
        u8      ts_pen_detect_sensitivity;
 
        /* startup time calculate function */
-       u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
+       u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
 
        u8      num_channels;
        struct at91_adc_reg_desc registers;
@@ -201,7 +201,7 @@ struct at91_adc_state {
        u8                      num_channels;
        void __iomem            *reg_base;
        struct at91_adc_reg_desc *registers;
-       u                     startup_time;
+       u32                     startup_time;
        u8                      sample_hold_time;
        bool                    sleep_mode;
        struct iio_trigger      **trig;
@@ -779,7 +779,7 @@ ret:
        return ret;
 }
 
-static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
+static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
 {
        /*
         * Number of ticks needed to cover the startup time of the ADC
@@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
        return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
 }
 
-static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
+static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
 {
        /*
         * For sama5d3x and at91sam9x5, the formula changes to:
index 8d9c9b9215ddc1ef5530df512d475a618cb71d8f..d819823f725747e5cd4ac8bcb50d3d833093ed2c 100644 (file)
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi)
        indio_dev->channels = chip_info->channels;
        indio_dev->num_channels = chip_info->num_channels;
 
+       adc->chip_info = chip_info;
+
        adc->transfer[0].tx_buf = &adc->tx_buf;
        adc->transfer[0].len = sizeof(adc->tx_buf);
        adc->transfer[1].rx_buf = adc->rx_buf;
index 8d4e019ea4ca5d0c3c773b6552b613b484c4b570..9c311c1e1ac7f89cef190a3445507025fdb8f131 100644 (file)
@@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = {
 };
 
 module_platform_driver(rockchip_saradc_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Rockchip SARADC driver");
+MODULE_LICENSE("GPL v2");
index 06f4792240f025ac56b9cc42946cc4834f0ad480..ebe415f1064000c95c88f5d20da48a7f52f71d84 100644 (file)
@@ -833,7 +833,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
        irq = platform_get_irq(pdev, 0);
        ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
                                   twl4030_madc_threaded_irq_handler,
-                                  IRQF_TRIGGER_RISING, "twl4030_madc", madc);
+                                  IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                  "twl4030_madc", madc);
        if (ret) {
                dev_err(&pdev->dev, "could not request irq\n");
                goto err_i2c;
index 480f335a0f9faee31bc6b2ac14f7899f18d6c9be..819632bf1fda7fee8fddd2acb3c02d9ff112b1ff 100644 (file)
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
        struct vf610_adc *info = iio_priv(indio_dev);
 
        if ((readval == NULL) ||
-               (!(reg % 4) || (reg > VF610_REG_ADC_PCTL)))
+               ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
                return -EINVAL;
 
        *readval = readl(info->regs + reg);
index 610fc98f88efa4f05fd996505f45411756a50fb1..595511022795f6d917a1c14ae36a7a7267f74602 100644 (file)
@@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
        s32 poll_value = 0;
 
        if (state) {
+               if (!atomic_read(&st->user_requested_state))
+                       return 0;
                if (sensor_hub_device_open(st->hsdev))
                        return -EIO;
 
@@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 
                poll_value = hid_sensor_read_poll_value(st);
        } else {
-               if (!atomic_dec_and_test(&st->data_ready))
+               int val;
+
+               val = atomic_dec_if_positive(&st->data_ready);
+               if (val < 0)
                        return 0;
+
                sensor_hub_device_close(st->hsdev);
                state_val = hid_sensor_get_usage_index(st->hsdev,
                        st->power_state.report_id,
@@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state);
 
 int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 {
+
 #ifdef CONFIG_PM
        int ret;
 
+       atomic_set(&st->user_requested_state, state);
        if (state)
                ret = pm_runtime_get_sync(&st->pdev->dev);
        else {
@@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
 
        return 0;
 #else
+       atomic_set(&st->user_requested_state, state);
        return _hid_sensor_power_state(st, state);
 #endif
 }
index 61bb9d4239eafdacf6f6d5f8cdb7238537d2c376..e98428df0d44781e557ed2dfcf96017ad7d48baf 100644 (file)
@@ -22,7 +22,7 @@
 #include "ad5624r.h"
 
 static int ad5624r_spi_write(struct spi_device *spi,
-                            u8 cmd, u8 addr, u16 val, u8 len)
+                            u8 cmd, u8 addr, u16 val, u8 shift)
 {
        u32 data;
        u8 msg[3];
@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
         * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
         * for the AD5664R, AD5644R, and AD5624R, respectively.
         */
-       data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
+       data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
        msg[0] = data >> 16;
        msg[1] = data >> 8;
        msg[2] = data;
index 17d4bb15be4d2998681f203b5944c5c82ce606ea..65ce86837177158aba7e4d5fd2064e0731129369 100644 (file)
@@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
        return -EINVAL;
 }
 
+static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
+                                struct iio_chan_spec const *chan, long mask)
+{
+       switch (mask) {
+       case IIO_CHAN_INFO_SCALE:
+               switch (chan->type) {
+               case IIO_ANGL_VEL:
+                       return IIO_VAL_INT_PLUS_NANO;
+               default:
+                       return IIO_VAL_INT_PLUS_MICRO;
+               }
+       default:
+               return IIO_VAL_INT_PLUS_MICRO;
+       }
+
+       return -EINVAL;
+}
 static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
 {
        int result, i;
@@ -696,6 +713,7 @@ static const struct iio_info mpu_info = {
        .driver_module = THIS_MODULE,
        .read_raw = &inv_mpu6050_read_raw,
        .write_raw = &inv_mpu6050_write_raw,
+       .write_raw_get_fmt = &inv_write_raw_get_fmt,
        .attrs = &inv_attribute_group,
        .validate_trigger = inv_mpu6050_validate_trigger,
 };
index e6198b7c9cbfcf55a04b12049ccedbf3800e9b83..a5c59251ec0e93b90eeefee7637556629a32ede6 100644 (file)
@@ -188,6 +188,7 @@ config SENSORS_LM3533
 config LTR501
        tristate "LTR-501ALS-01 light sensor"
        depends on I2C
+       select REGMAP_I2C
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
@@ -201,6 +202,7 @@ config LTR501
 config STK3310
        tristate "STK3310 ALS and proximity sensor"
        depends on I2C
+       select REGMAP_I2C
        help
         Say yes here to get support for the Sensortek STK3310 ambient light
         and proximity sensor. The STK3311 model is also supported by this
index 869033e48a1facdd4021e3c9d2889d9fc7ab0773..a1d4905cc9d2d9680d5b3972233948fef3114035 100644 (file)
@@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
        for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
                if (val == cm3323_int_time[i].val &&
                    val2 == cm3323_int_time[i].val2) {
-                       reg_conf = data->reg_conf;
+                       reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
                        reg_conf |= i << CM3323_CONF_IT_SHIFT;
 
                        ret = i2c_smbus_write_word_data(data->client,
index 1ef7d3773ab90b4ee54dd69656f95899daed0f56..b5a0e66b5f282f09941dddf58de1b51eed3e71c2 100644 (file)
@@ -1302,7 +1302,7 @@ static int ltr501_init(struct ltr501_data *data)
        if (ret < 0)
                return ret;
 
-       data->als_contr = ret | data->chip_info->als_mode_active;
+       data->als_contr = status | data->chip_info->als_mode_active;
 
        ret = regmap_read(data->regmap, LTR501_PS_CONTR, &status);
        if (ret < 0)
index fee4297d7c8f8e039dd1716c655deff831f01a84..11a027adc204aeb630c66707eaf9d5d0ce169ebb 100644 (file)
@@ -43,7 +43,6 @@
 #define STK3311_CHIP_ID_VAL                    0x1D
 #define STK3310_PSINT_EN                       0x01
 #define STK3310_PS_MAX_VAL                     0xFFFF
-#define STK3310_THRESH_MAX                     0xFFFF
 
 #define STK3310_DRIVER_NAME                    "stk3310"
 #define STK3310_REGMAP_NAME                    "stk3310_regmap"
@@ -84,15 +83,13 @@ static const struct reg_field stk3310_reg_field_flag_psint =
                                REG_FIELD(STK3310_REG_FLAG, 4, 4);
 static const struct reg_field stk3310_reg_field_flag_nf =
                                REG_FIELD(STK3310_REG_FLAG, 0, 0);
-/*
- * Maximum PS values with regard to scale. Used to export the 'inverse'
- * PS value (high values for far objects, low values for near objects).
- */
+
+/* Estimate maximum proximity values with regard to measurement scale. */
 static const int stk3310_ps_max[4] = {
-       STK3310_PS_MAX_VAL / 64,
-       STK3310_PS_MAX_VAL / 16,
-       STK3310_PS_MAX_VAL /  4,
-       STK3310_PS_MAX_VAL,
+       STK3310_PS_MAX_VAL / 640,
+       STK3310_PS_MAX_VAL / 160,
+       STK3310_PS_MAX_VAL /  40,
+       STK3310_PS_MAX_VAL /  10
 };
 
 static const int stk3310_scale_table[][2] = {
@@ -128,14 +125,14 @@ static const struct iio_event_spec stk3310_events[] = {
        /* Proximity event */
        {
                .type = IIO_EV_TYPE_THRESH,
-               .dir = IIO_EV_DIR_FALLING,
+               .dir = IIO_EV_DIR_RISING,
                .mask_separate = BIT(IIO_EV_INFO_VALUE) |
                                 BIT(IIO_EV_INFO_ENABLE),
        },
        /* Out-of-proximity event */
        {
                .type = IIO_EV_TYPE_THRESH,
-               .dir = IIO_EV_DIR_RISING,
+               .dir = IIO_EV_DIR_FALLING,
                .mask_separate = BIT(IIO_EV_INFO_VALUE) |
                                 BIT(IIO_EV_INFO_ENABLE),
        },
@@ -203,25 +200,18 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
                              int *val, int *val2)
 {
        u8 reg;
-       u16 buf;
+       __be16 buf;
        int ret;
-       unsigned int index;
        struct stk3310_data *data = iio_priv(indio_dev);
 
        if (info != IIO_EV_INFO_VALUE)
                return -EINVAL;
 
-       /*
-        * Only proximity interrupts are implemented at the moment.
-        * Since we're inverting proximity values, the sensor's 'high'
-        * threshold will become our 'low' threshold, associated with
-        * 'near' events. Similarly, the sensor's 'low' threshold will
-        * be our 'high' threshold, associated with 'far' events.
-        */
+       /* Only proximity interrupts are implemented at the moment. */
        if (dir == IIO_EV_DIR_RISING)
-               reg = STK3310_REG_THDL_PS;
-       else if (dir == IIO_EV_DIR_FALLING)
                reg = STK3310_REG_THDH_PS;
+       else if (dir == IIO_EV_DIR_FALLING)
+               reg = STK3310_REG_THDL_PS;
        else
                return -EINVAL;
 
@@ -232,8 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
                dev_err(&data->client->dev, "register read failed\n");
                return ret;
        }
-       regmap_field_read(data->reg_ps_gain, &index);
-       *val = swab16(stk3310_ps_max[index] - buf);
+       *val = be16_to_cpu(buf);
 
        return IIO_VAL_INT;
 }
@@ -246,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
                               int val, int val2)
 {
        u8 reg;
-       u16 buf;
+       __be16 buf;
        int ret;
        unsigned int index;
        struct stk3310_data *data = iio_priv(indio_dev);
@@ -257,13 +246,13 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
                return -EINVAL;
 
        if (dir == IIO_EV_DIR_RISING)
-               reg = STK3310_REG_THDL_PS;
-       else if (dir == IIO_EV_DIR_FALLING)
                reg = STK3310_REG_THDH_PS;
+       else if (dir == IIO_EV_DIR_FALLING)
+               reg = STK3310_REG_THDL_PS;
        else
                return -EINVAL;
 
-       buf = swab16(stk3310_ps_max[index] - val);
+       buf = cpu_to_be16(val);
        ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
        if (ret < 0)
                dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -312,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
                            int *val, int *val2, long mask)
 {
        u8 reg;
-       u16 buf;
+       __be16 buf;
        int ret;
        unsigned int index;
        struct stk3310_data *data = iio_priv(indio_dev);
@@ -333,15 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
                        mutex_unlock(&data->lock);
                        return ret;
                }
-               *val = swab16(buf);
-               if (chan->type == IIO_PROXIMITY) {
-                       /*
-                        * Invert the proximity data so we return low values
-                        * for close objects and high values for far ones.
-                        */
-                       regmap_field_read(data->reg_ps_gain, &index);
-                       *val = stk3310_ps_max[index] - *val;
-               }
+               *val = be16_to_cpu(buf);
                mutex_unlock(&data->lock);
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_INT_TIME:
@@ -581,8 +562,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
        }
        event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
                                     IIO_EV_TYPE_THRESH,
-                                    (dir ? IIO_EV_DIR_RISING :
-                                           IIO_EV_DIR_FALLING));
+                                    (dir ? IIO_EV_DIR_FALLING :
+                                           IIO_EV_DIR_RISING));
        iio_push_event(indio_dev, event, data->timestamp);
 
        /* Reset the interrupt flag */
@@ -627,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
 
-       ret = iio_device_register(indio_dev);
-       if (ret < 0) {
-               dev_err(&client->dev, "device_register failed\n");
-               stk3310_set_state(data, STK3310_STATE_STANDBY);
-       }
-
-       if (client->irq <= 0)
+       if (client->irq < 0)
                client->irq = stk3310_gpio_probe(client);
 
        if (client->irq >= 0) {
@@ -648,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client,
                                        client->irq);
        }
 
+       ret = iio_device_register(indio_dev);
+       if (ret < 0) {
+               dev_err(&client->dev, "device_register failed\n");
+               stk3310_set_state(data, STK3310_STATE_STANDBY);
+       }
+
        return ret;
 }
 
index 71c2bde275aac65ef8b7eb2f756fb05acb4e1958..f8b1df018abeeba410c19dc48bd9723663973977 100644 (file)
@@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev,
                if (val != 0)
                        return -EINVAL;
                for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
-                       if (val == tcs3414_times[i] * 1000) {
+                       if (val2 == tcs3414_times[i] * 1000) {
                                data->timing &= ~TCS3414_INTEG_MASK;
                                data->timing |= i;
                                return i2c_smbus_write_byte_data(
index dcadfc4f06619a1a147d560581bb7bfabdd3f663..efb9350b0d766a0915a2c091c04b9bdea15c5d4e 100644 (file)
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
 config BMC150_MAGN
        tristate "Bosch BMC150 Magnetometer Driver"
        depends on I2C
+       select REGMAP_I2C
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index d4c1788699911e934fc3c1baffa3de8928ac26a6..1347a1f2e46f8194bad0bfe49bd91ea6a948ac8e 100644 (file)
@@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
                goto err_poweroff;
        }
        if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
-               dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret);
+               dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
                ret = -ENODEV;
                goto err_poweroff;
        }
-       dev_dbg(&data->client->dev, "Chip id %x\n", ret);
+       dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
 
        preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
        ret = bmc150_magn_set_odr(data, preset.odr);
index 7a2ea71c659a0162baf0591af149045c36224d1b..706ebfd6297fa7c0a311a80bfcb431b533d2cc70 100644 (file)
 #define MMC35240_OTP_START_ADDR                0x1B
 
 enum mmc35240_resolution {
-       MMC35240_16_BITS_SLOW = 0, /* 100 Hz */
-       MMC35240_16_BITS_FAST,     /* 200 Hz */
-       MMC35240_14_BITS,          /* 333 Hz */
-       MMC35240_12_BITS,          /* 666 Hz */
+       MMC35240_16_BITS_SLOW = 0, /* 7.92 ms */
+       MMC35240_16_BITS_FAST,     /* 4.08 ms */
+       MMC35240_14_BITS,          /* 2.16 ms */
+       MMC35240_12_BITS,          /* 1.20 ms */
 };
 
 enum mmc35240_axis {
@@ -100,22 +100,22 @@ static const struct {
        int sens[3]; /* sensitivity per X, Y, Z axis */
        int nfo; /* null field output */
 } mmc35240_props_table[] = {
-       /* 16 bits, 100Hz ODR */
+       /* 16 bits, 125Hz ODR */
        {
                {1024, 1024, 1024},
                32768,
        },
-       /* 16 bits, 200Hz ODR */
+       /* 16 bits, 250Hz ODR */
        {
                {1024, 1024, 770},
                32768,
        },
-       /* 14 bits, 333Hz ODR */
+       /* 14 bits, 450Hz ODR */
        {
                {256, 256, 193},
                8192,
        },
-       /* 12 bits, 666Hz ODR */
+       /* 12 bits, 800Hz ODR */
        {
                {64, 64, 48},
                2048,
@@ -133,9 +133,15 @@ struct mmc35240_data {
        int axis_scale[3];
 };
 
-static const int mmc35240_samp_freq[] = {100, 200, 333, 666};
+static const struct {
+       int val;
+       int val2;
+} mmc35240_samp_freq[] = { {1, 500000},
+                          {13, 0},
+                          {25, 0},
+                          {50, 0} };
 
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("100 200 333 666");
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1.5 13 25 50");
 
 #define MMC35240_CHANNEL(_axis) { \
        .type = IIO_MAGN, \
@@ -168,7 +174,8 @@ static int mmc35240_get_samp_freq_index(struct mmc35240_data *data,
        int i;
 
        for (i = 0; i < ARRAY_SIZE(mmc35240_samp_freq); i++)
-               if (mmc35240_samp_freq[i] == val)
+               if (mmc35240_samp_freq[i].val == val &&
+                   mmc35240_samp_freq[i].val2 == val2)
                        return i;
        return -EINVAL;
 }
@@ -195,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
                coil_bit = MMC35240_CTRL0_RESET_BIT;
 
        return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
-                                 MMC35240_CTRL0_REFILL_BIT,
-                                 coil_bit);
+                                 coil_bit, coil_bit);
+
 }
 
 static int mmc35240_init(struct mmc35240_data *data)
@@ -215,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
 
        /*
         * make sure we restore sensor characteristics, by doing
-        * a RESET/SET sequence
+        * a SET/RESET sequence, the axis polarity being naturally
+        * aligned after RESET
         */
-       ret = mmc35240_hw_set(data, false);
+       ret = mmc35240_hw_set(data, true);
        if (ret < 0)
                return ret;
        usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
 
-       ret = mmc35240_hw_set(data, true);
+       ret = mmc35240_hw_set(data, false);
        if (ret < 0)
                return ret;
 
@@ -378,9 +386,9 @@ static int mmc35240_read_raw(struct iio_dev *indio_dev,
                if (i < 0 || i >= ARRAY_SIZE(mmc35240_samp_freq))
                        return -EINVAL;
 
-               *val = mmc35240_samp_freq[i];
-               *val2 = 0;
-               return IIO_VAL_INT;
+               *val = mmc35240_samp_freq[i].val;
+               *val2 = mmc35240_samp_freq[i].val2;
+               return IIO_VAL_INT_PLUS_MICRO;
        default:
                return -EINVAL;
        }
@@ -496,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
        }
 
        data = iio_priv(indio_dev);
+       i2c_set_clientdata(client, indio_dev);
        data->client = client;
        data->regmap = regmap;
        data->res = MMC35240_16_BITS_SLOW;
index 2042e375f8351de6dc04b0b6ae7861702d17c6cf..3d756bd8c703230bfc93f530d03399f9f5e1413f 100644 (file)
@@ -80,6 +80,7 @@
 #define SX9500_COMPSTAT_MASK           GENMASK(3, 0)
 
 #define SX9500_NUM_CHANNELS            4
+#define SX9500_CHAN_MASK               GENMASK(SX9500_NUM_CHANNELS - 1, 0)
 
 struct sx9500_data {
        struct mutex mutex;
@@ -281,7 +282,7 @@ static int sx9500_read_prox_data(struct sx9500_data *data,
        if (ret < 0)
                return ret;
 
-       *val = 32767 - (s16)be16_to_cpu(regval);
+       *val = be16_to_cpu(regval);
 
        return IIO_VAL_INT;
 }
@@ -329,20 +330,20 @@ static int sx9500_read_proximity(struct sx9500_data *data,
        else
                ret = sx9500_wait_for_sample(data);
 
-       if (ret < 0)
-               return ret;
-
        mutex_lock(&data->mutex);
 
-       ret = sx9500_read_prox_data(data, chan, val);
        if (ret < 0)
-               goto out;
+               goto out_dec_data_rdy;
 
-       ret = sx9500_dec_chan_users(data, chan->channel);
+       ret = sx9500_read_prox_data(data, chan, val);
        if (ret < 0)
-               goto out;
+               goto out_dec_data_rdy;
 
        ret = sx9500_dec_data_rdy_users(data);
+       if (ret < 0)
+               goto out_dec_chan;
+
+       ret = sx9500_dec_chan_users(data, chan->channel);
        if (ret < 0)
                goto out;
 
@@ -350,6 +351,8 @@ static int sx9500_read_proximity(struct sx9500_data *data,
 
        goto out;
 
+out_dec_data_rdy:
+       sx9500_dec_data_rdy_users(data);
 out_dec_chan:
        sx9500_dec_chan_users(data, chan->channel);
 out:
@@ -679,7 +682,7 @@ out:
 static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
 {
        struct sx9500_data *data = iio_priv(indio_dev);
-       int ret, i;
+       int ret = 0, i;
 
        mutex_lock(&data->mutex);
 
@@ -703,7 +706,7 @@ static int sx9500_buffer_preenable(struct iio_dev *indio_dev)
 static int sx9500_buffer_predisable(struct iio_dev *indio_dev)
 {
        struct sx9500_data *data = iio_priv(indio_dev);
-       int ret, i;
+       int ret = 0, i;
 
        iio_triggered_buffer_predisable(indio_dev);
 
@@ -800,8 +803,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
        unsigned int val;
 
        ret = regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
-                                GENMASK(SX9500_NUM_CHANNELS, 0),
-                                GENMASK(SX9500_NUM_CHANNELS, 0));
+                                SX9500_CHAN_MASK, SX9500_CHAN_MASK);
        if (ret < 0)
                return ret;
 
@@ -821,7 +823,7 @@ static int sx9500_init_compensation(struct iio_dev *indio_dev)
 
 out:
        regmap_update_bits(data->regmap, SX9500_REG_PROX_CTRL0,
-                          GENMASK(SX9500_NUM_CHANNELS, 0), 0);
+                          SX9500_CHAN_MASK, 0);
        return ret;
 }
 
index cb2e8ad8bfdcd02e75b9ea4f82e31c598e95109e..7a2b639eaa96e2ea440f3102c5da101d41edcd2b 100644 (file)
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
                *val = ret;
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_OFFSET:
-               *val = 13657;
+               *val = -13657;
                *val2 = 500000;
                return IIO_VAL_INT_PLUS_MICRO;
        case IIO_CHAN_INFO_SCALE:
index fcc49f89b9464cfa8d6206602c6b1f2d518c9ecc..8f21f32f9739e23f9558e8b38434daa5a5856f3e 100644 (file)
@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
        struct tmp006_data *data = iio_priv(indio_dev);
        int i;
 
+       if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+               return -EINVAL;
+
        for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
                if ((val == tmp006_freqs[i][0]) &&
                    (val2 == tmp006_freqs[i][1])) {
index c7dcfe4ca5f10219e553cd4cb5acdd2e1658c95b..0429040304fd478a7ad7833df48c0bdc74c429bc 100644 (file)
@@ -88,7 +88,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
        struct ib_ah *ah;
        struct ib_mad_send_wr_private *mad_send_wr;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH)
+       if (rdma_cap_ib_switch(device))
                port_priv = ib_get_agent_port(device, 0);
        else
                port_priv = ib_get_agent_port(device, port_num);
@@ -122,7 +122,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
        memcpy(send_buf->mad, mad_hdr, resp_mad_len);
        send_buf->ah = ah;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
+       if (rdma_cap_ib_switch(device)) {
                mad_send_wr = container_of(send_buf,
                                           struct ib_mad_send_wr_private,
                                           send_buf);
index dbddddd6fb5d111e94e44e2800282c84312131a0..3a972ebf3c0d1170efe280aa7bcf781c831fa98f 100644 (file)
@@ -169,6 +169,7 @@ struct cm_device {
        struct ib_device *ib_device;
        struct device *device;
        u8 ack_delay;
+       int going_down;
        struct cm_port *port[0];
 };
 
@@ -805,6 +806,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
 {
        int wait_time;
        unsigned long flags;
+       struct cm_device *cm_dev;
+
+       cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
+       if (!cm_dev)
+               return;
 
        spin_lock_irqsave(&cm.lock, flags);
        cm_cleanup_timewait(cm_id_priv->timewait_info);
@@ -818,8 +824,14 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
         */
        cm_id_priv->id.state = IB_CM_TIMEWAIT;
        wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
-       queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
-                          msecs_to_jiffies(wait_time));
+
+       /* Check if the device started its remove_one */
+       spin_lock_irq(&cm.lock);
+       if (!cm_dev->going_down)
+               queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
+                                  msecs_to_jiffies(wait_time));
+       spin_unlock_irq(&cm.lock);
+
        cm_id_priv->timewait_info = NULL;
 }
 
@@ -3305,6 +3317,11 @@ static int cm_establish(struct ib_cm_id *cm_id)
        struct cm_work *work;
        unsigned long flags;
        int ret = 0;
+       struct cm_device *cm_dev;
+
+       cm_dev = ib_get_client_data(cm_id->device, &cm_client);
+       if (!cm_dev)
+               return -ENODEV;
 
        work = kmalloc(sizeof *work, GFP_ATOMIC);
        if (!work)
@@ -3343,7 +3360,17 @@ static int cm_establish(struct ib_cm_id *cm_id)
        work->remote_id = cm_id->remote_id;
        work->mad_recv_wc = NULL;
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
-       queue_delayed_work(cm.wq, &work->work, 0);
+
+       /* Check if the device started its remove_one */
+       spin_lock_irq(&cm.lock);
+       if (!cm_dev->going_down) {
+               queue_delayed_work(cm.wq, &work->work, 0);
+       } else {
+               kfree(work);
+               ret = -ENODEV;
+       }
+       spin_unlock_irq(&cm.lock);
+
 out:
        return ret;
 }
@@ -3394,6 +3421,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
        enum ib_cm_event_type event;
        u16 attr_id;
        int paths = 0;
+       int going_down = 0;
 
        switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
        case CM_REQ_ATTR_ID:
@@ -3452,7 +3480,19 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
        work->cm_event.event = event;
        work->mad_recv_wc = mad_recv_wc;
        work->port = port;
-       queue_delayed_work(cm.wq, &work->work, 0);
+
+       /* Check if the device started its remove_one */
+       spin_lock_irq(&cm.lock);
+       if (!port->cm_dev->going_down)
+               queue_delayed_work(cm.wq, &work->work, 0);
+       else
+               going_down = 1;
+       spin_unlock_irq(&cm.lock);
+
+       if (going_down) {
+               kfree(work);
+               ib_free_recv_mad(mad_recv_wc);
+       }
 }
 
 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3771,7 +3811,7 @@ static void cm_add_one(struct ib_device *ib_device)
 
        cm_dev->ib_device = ib_device;
        cm_get_ack_delay(cm_dev);
-
+       cm_dev->going_down = 0;
        cm_dev->device = device_create(&cm_class, &ib_device->dev,
                                       MKDEV(0, 0), NULL,
                                       "%s", ib_device->name);
@@ -3864,14 +3904,23 @@ static void cm_remove_one(struct ib_device *ib_device)
        list_del(&cm_dev->list);
        write_unlock_irqrestore(&cm.device_lock, flags);
 
+       spin_lock_irq(&cm.lock);
+       cm_dev->going_down = 1;
+       spin_unlock_irq(&cm.lock);
+
        for (i = 1; i <= ib_device->phys_port_cnt; i++) {
                if (!rdma_cap_ib_cm(ib_device, i))
                        continue;
 
                port = cm_dev->port[i-1];
                ib_modify_port(ib_device, port->port_num, 0, &port_modify);
-               ib_unregister_mad_agent(port->mad_agent);
+               /*
+                * We flush the queue here after the going_down set, this
+                * verify that no new works will be queued in the recv handler,
+                * after that we can call the unregister_mad_agent
+                */
                flush_workqueue(cm.wq);
+               ib_unregister_mad_agent(port->mad_agent);
                cm_remove_port_fs(port);
        }
        device_unregister(cm_dev->device);
index e6ffa2e66c1ac54b7a2645f59bbb28c00bc91cc3..22a3abee2a54c0fdce95a4567ef3cc45a20901ad 100644 (file)
@@ -67,7 +67,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
                err_str = "Invalid port mapper client";
                goto pid_query_error;
        }
-       if (iwpm_registered_client(nl_client))
+       if (iwpm_check_registration(nl_client, IWPM_REG_VALID) ||
+                       iwpm_user_pid == IWPM_PID_UNAVAILABLE)
                return 0;
        skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REG_PID, &nlh, nl_client);
        if (!skb) {
@@ -106,7 +107,6 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
        ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_IWPM, GFP_KERNEL);
        if (ret) {
                skb = NULL; /* skb is freed in the netlink send-op handling */
-               iwpm_set_registered(nl_client, 1);
                iwpm_user_pid = IWPM_PID_UNAVAILABLE;
                err_str = "Unable to send a nlmsg";
                goto pid_query_error;
@@ -144,12 +144,12 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
                err_str = "Invalid port mapper client";
                goto add_mapping_error;
        }
-       if (!iwpm_registered_client(nl_client)) {
+       if (!iwpm_valid_pid())
+               return 0;
+       if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
                err_str = "Unregistered port mapper client";
                goto add_mapping_error;
        }
-       if (!iwpm_valid_pid())
-               return 0;
        skb = iwpm_create_nlmsg(RDMA_NL_IWPM_ADD_MAPPING, &nlh, nl_client);
        if (!skb) {
                err_str = "Unable to create a nlmsg";
@@ -214,12 +214,12 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
                err_str = "Invalid port mapper client";
                goto query_mapping_error;
        }
-       if (!iwpm_registered_client(nl_client)) {
+       if (!iwpm_valid_pid())
+               return 0;
+       if (!iwpm_check_registration(nl_client, IWPM_REG_VALID)) {
                err_str = "Unregistered port mapper client";
                goto query_mapping_error;
        }
-       if (!iwpm_valid_pid())
-               return 0;
        ret = -ENOMEM;
        skb = iwpm_create_nlmsg(RDMA_NL_IWPM_QUERY_MAPPING, &nlh, nl_client);
        if (!skb) {
@@ -288,12 +288,12 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
                err_str = "Invalid port mapper client";
                goto remove_mapping_error;
        }
-       if (!iwpm_registered_client(nl_client)) {
+       if (!iwpm_valid_pid())
+               return 0;
+       if (iwpm_check_registration(nl_client, IWPM_REG_UNDEF)) {
                err_str = "Unregistered port mapper client";
                goto remove_mapping_error;
        }
-       if (!iwpm_valid_pid())
-               return 0;
        skb = iwpm_create_nlmsg(RDMA_NL_IWPM_REMOVE_MAPPING, &nlh, nl_client);
        if (!skb) {
                ret = -ENOMEM;
@@ -388,7 +388,7 @@ int iwpm_register_pid_cb(struct sk_buff *skb, struct netlink_callback *cb)
        pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
                        __func__, iwpm_user_pid);
        if (iwpm_valid_client(nl_client))
-               iwpm_set_registered(nl_client, 1);
+               iwpm_set_registration(nl_client, IWPM_REG_VALID);
 register_pid_response_exit:
        nlmsg_request->request_done = 1;
        /* always for found nlmsg_request */
@@ -644,7 +644,6 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct nlattr *nltb[IWPM_NLA_MAPINFO_REQ_MAX];
        const char *msg_type = "Mapping Info response";
-       int iwpm_pid;
        u8 nl_client;
        char *iwpm_name;
        u16 iwpm_version;
@@ -669,14 +668,14 @@ int iwpm_mapping_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
                                __func__, nl_client);
                return ret;
        }
-       iwpm_set_registered(nl_client, 0);
+       iwpm_set_registration(nl_client, IWPM_REG_INCOMPL);
        atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+       iwpm_user_pid = cb->nlh->nlmsg_pid;
        if (!iwpm_mapinfo_available())
                return 0;
-       iwpm_pid = cb->nlh->nlmsg_pid;
        pr_debug("%s: iWarp Port Mapper (pid = %d) is available!\n",
-                __func__, iwpm_pid);
-       ret = iwpm_send_mapinfo(nl_client, iwpm_pid);
+                __func__, iwpm_user_pid);
+       ret = iwpm_send_mapinfo(nl_client, iwpm_user_pid);
        return ret;
 }
 EXPORT_SYMBOL(iwpm_mapping_info_cb);
index a626795bf9c71f43f7d526d07ae3a490399fdb79..5fb089e913530c54a9852d4ae6fabcda24a00451 100644 (file)
@@ -78,6 +78,7 @@ init_exit:
        mutex_unlock(&iwpm_admin_lock);
        if (!ret) {
                iwpm_set_valid(nl_client, 1);
+               iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
                pr_debug("%s: Mapinfo and reminfo tables are created\n",
                                __func__);
        }
@@ -106,6 +107,7 @@ int iwpm_exit(u8 nl_client)
        }
        mutex_unlock(&iwpm_admin_lock);
        iwpm_set_valid(nl_client, 0);
+       iwpm_set_registration(nl_client, IWPM_REG_UNDEF);
        return 0;
 }
 EXPORT_SYMBOL(iwpm_exit);
@@ -397,17 +399,23 @@ void iwpm_set_valid(u8 nl_client, int valid)
 }
 
 /* valid client */
-int iwpm_registered_client(u8 nl_client)
+u32 iwpm_get_registration(u8 nl_client)
 {
        return iwpm_admin.reg_list[nl_client];
 }
 
 /* valid client */
-void iwpm_set_registered(u8 nl_client, int reg)
+void iwpm_set_registration(u8 nl_client, u32 reg)
 {
        iwpm_admin.reg_list[nl_client] = reg;
 }
 
+/* valid client */
+u32 iwpm_check_registration(u8 nl_client, u32 reg)
+{
+       return (iwpm_get_registration(nl_client) & reg);
+}
+
 int iwpm_compare_sockaddr(struct sockaddr_storage *a_sockaddr,
                                struct sockaddr_storage *b_sockaddr)
 {
index ee2d9ff095be2d68d14c9c48eb551f9647ca562f..b7b9e194ce81fd8f2c7598bb2a6b58cab4fad6b8 100644 (file)
 #define IWPM_PID_UNDEFINED     -1
 #define IWPM_PID_UNAVAILABLE   -2
 
+#define IWPM_REG_UNDEF          0x01
+#define IWPM_REG_VALID          0x02
+#define IWPM_REG_INCOMPL        0x04
+
 struct iwpm_nlmsg_request {
        struct list_head    inprocess_list;
        __u32               nlmsg_seq;
@@ -88,7 +92,7 @@ struct iwpm_admin_data {
        atomic_t refcount;
        atomic_t nlmsg_seq;
        int      client_list[RDMA_NL_NUM_CLIENTS];
-       int      reg_list[RDMA_NL_NUM_CLIENTS];
+       u32      reg_list[RDMA_NL_NUM_CLIENTS];
 };
 
 /**
@@ -159,19 +163,31 @@ int iwpm_valid_client(u8 nl_client);
 void iwpm_set_valid(u8 nl_client, int valid);
 
 /**
- * iwpm_registered_client - Check if the port mapper client is registered
+ * iwpm_check_registration - Check if the client registration
+ *                           matches the given one
  * @nl_client: The index of the netlink client
+ * @reg: The given registration type to compare with
  *
  * Call iwpm_register_pid() to register a client
+ * Returns true if the client registration matches reg,
+ * otherwise returns false
+ */
+u32 iwpm_check_registration(u8 nl_client, u32 reg);
+
+/**
+ * iwpm_set_registration - Set the client registration
+ * @nl_client: The index of the netlink client
+ * @reg: Registration type to set
  */
-int iwpm_registered_client(u8 nl_client);
+void iwpm_set_registration(u8 nl_client, u32 reg);
 
 /**
- * iwpm_set_registered - Set the port mapper client to registered or not
+ * iwpm_get_registration
  * @nl_client: The index of the netlink client
- * @reg: 1 if registered or 0 if not
+ *
+ * Returns the client registration type
  */
-void iwpm_set_registered(u8 nl_client, int reg);
+u32 iwpm_get_registration(u8 nl_client);
 
 /**
  * iwpm_send_mapinfo - Send local and mapped IPv4/IPv6 address info of
index a4b1466c1bf686431db027309db9722c7b044455..786fc51bf04b22b0d9b0fc371f3fdb25ec4c811b 100644 (file)
@@ -769,7 +769,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
        bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
                                    mad_agent_priv->qp_info->port_priv->port_num);
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH &&
+       if (rdma_cap_ib_switch(device) &&
            smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
                port_num = send_wr->wr.ud.port_num;
        else
@@ -787,14 +787,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
                if ((opa_get_smp_direction(opa_smp)
                     ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
                     OPA_LID_PERMISSIVE &&
-                    opa_smi_handle_dr_smp_send(opa_smp, device->node_type,
+                    opa_smi_handle_dr_smp_send(opa_smp,
+                                               rdma_cap_ib_switch(device),
                                                port_num) == IB_SMI_DISCARD) {
                        ret = -EINVAL;
                        dev_err(&device->dev, "OPA Invalid directed route\n");
                        goto out;
                }
                opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
-               if (opa_drslid != OPA_LID_PERMISSIVE &&
+               if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
                    opa_drslid & 0xffff0000) {
                        ret = -EINVAL;
                        dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
@@ -810,7 +811,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
        } else {
                if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
                     IB_LID_PERMISSIVE &&
-                    smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
+                    smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
                     IB_SMI_DISCARD) {
                        ret = -EINVAL;
                        dev_err(&device->dev, "Invalid directed route\n");
@@ -2030,7 +2031,7 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
        struct ib_smp *smp = (struct ib_smp *)recv->mad;
 
        if (smi_handle_dr_smp_recv(smp,
-                                  port_priv->device->node_type,
+                                  rdma_cap_ib_switch(port_priv->device),
                                   port_num,
                                   port_priv->device->phys_port_cnt) ==
                                   IB_SMI_DISCARD)
@@ -2042,13 +2043,13 @@ static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv
 
        if (retsmi == IB_SMI_SEND) { /* don't forward */
                if (smi_handle_dr_smp_send(smp,
-                                          port_priv->device->node_type,
+                                          rdma_cap_ib_switch(port_priv->device),
                                           port_num) == IB_SMI_DISCARD)
                        return IB_SMI_DISCARD;
 
                if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
                        return IB_SMI_DISCARD;
-       } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+       } else if (rdma_cap_ib_switch(port_priv->device)) {
                /* forward case for switches */
                memcpy(response, recv, mad_priv_size(response));
                response->header.recv_wc.wc = &response->header.wc;
@@ -2115,7 +2116,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
        struct opa_smp *smp = (struct opa_smp *)recv->mad;
 
        if (opa_smi_handle_dr_smp_recv(smp,
-                                  port_priv->device->node_type,
+                                  rdma_cap_ib_switch(port_priv->device),
                                   port_num,
                                   port_priv->device->phys_port_cnt) ==
                                   IB_SMI_DISCARD)
@@ -2127,7 +2128,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
 
        if (retsmi == IB_SMI_SEND) { /* don't forward */
                if (opa_smi_handle_dr_smp_send(smp,
-                                          port_priv->device->node_type,
+                                          rdma_cap_ib_switch(port_priv->device),
                                           port_num) == IB_SMI_DISCARD)
                        return IB_SMI_DISCARD;
 
@@ -2135,7 +2136,7 @@ handle_opa_smi(struct ib_mad_port_private *port_priv,
                    IB_SMI_DISCARD)
                        return IB_SMI_DISCARD;
 
-       } else if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) {
+       } else if (rdma_cap_ib_switch(port_priv->device)) {
                /* forward case for switches */
                memcpy(response, recv, mad_priv_size(response));
                response->header.recv_wc.wc = &response->header.wc;
@@ -2235,7 +2236,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
                goto out;
        }
 
-       if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH)
+       if (rdma_cap_ib_switch(port_priv->device))
                port_num = wc->port_num;
        else
                port_num = port_priv->port_num;
@@ -3297,17 +3298,11 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
 
 static void ib_mad_init_device(struct ib_device *device)
 {
-       int start, end, i;
+       int start, i;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               start = 0;
-               end   = 0;
-       } else {
-               start = 1;
-               end   = device->phys_port_cnt;
-       }
+       start = rdma_start_port(device);
 
-       for (i = start; i <= end; i++) {
+       for (i = start; i <= rdma_end_port(device); i++) {
                if (!rdma_cap_ib_mad(device, i))
                        continue;
 
@@ -3342,17 +3337,9 @@ error:
 
 static void ib_mad_remove_device(struct ib_device *device)
 {
-       int start, end, i;
-
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               start = 0;
-               end   = 0;
-       } else {
-               start = 1;
-               end   = device->phys_port_cnt;
-       }
+       int i;
 
-       for (i = start; i <= end; i++) {
+       for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
                if (!rdma_cap_ib_mad(device, i))
                        continue;
 
index 1244f02a5c6d402aa5389e206b6b8e5482ec2de2..2cb865c7ce7a98773f338b1b8c09ffc66db4cebf 100644 (file)
@@ -812,12 +812,8 @@ static void mcast_add_one(struct ib_device *device)
        if (!dev)
                return;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH)
-               dev->start_port = dev->end_port = 0;
-       else {
-               dev->start_port = 1;
-               dev->end_port = device->phys_port_cnt;
-       }
+       dev->start_port = rdma_start_port(device);
+       dev->end_port = rdma_end_port(device);
 
        for (i = 0; i <= dev->end_port - dev->start_port; i++) {
                if (!rdma_cap_ib_mcast(device, dev->start_port + i))
index 62d91bfa4cb70bed63cfac71a746fca544513aff..3bfab3505a2917d561d9d45372c091b38649151c 100644 (file)
 
 #include "smi.h"
 
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
                                       int port_num, int phys_port_cnt);
 int opa_smi_get_fwd_port(struct opa_smp *smp);
 extern enum smi_forward_action opa_smi_check_forward_dr_smp(struct opa_smp *smp);
 extern enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
-                                             u8 node_type, int port_num);
+                                             bool is_switch, int port_num);
 
 /*
  * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
index 0fae85062a65b8704ddc11a117751df62db9c9d4..ca919f4296664f070f0c63b1765542375818f0aa 100644 (file)
@@ -1156,12 +1156,8 @@ static void ib_sa_add_one(struct ib_device *device)
        int s, e, i;
        int count = 0;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH)
-               s = e = 0;
-       else {
-               s = 1;
-               e = device->phys_port_cnt;
-       }
+       s = rdma_start_port(device);
+       e = rdma_end_port(device);
 
        sa_dev = kzalloc(sizeof *sa_dev +
                         (e - s + 1) * sizeof (struct ib_sa_port),
index 368a561d1a5d49d931ef45738c35f3be4b068725..f19b23817c2b49b3650f36077f00d67df801b3a7 100644 (file)
@@ -41,7 +41,7 @@
 #include "smi.h"
 #include "opa_smi.h"
 
-static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_send(bool is_switch, int port_num,
                                                u8 *hop_ptr, u8 hop_cnt,
                                                const u8 *initial_path,
                                                const u8 *return_path,
@@ -64,7 +64,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
 
                /* C14-9:2 */
                if (*hop_ptr && *hop_ptr < hop_cnt) {
-                       if (node_type != RDMA_NODE_IB_SWITCH)
+                       if (!is_switch)
                                return IB_SMI_DISCARD;
 
                        /* return_path set when received */
@@ -77,7 +77,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
                if (*hop_ptr == hop_cnt) {
                        /* return_path set when received */
                        (*hop_ptr)++;
-                       return (node_type == RDMA_NODE_IB_SWITCH ||
+                       return (is_switch ||
                                dr_dlid_is_permissive ?
                                IB_SMI_HANDLE : IB_SMI_DISCARD);
                }
@@ -96,7 +96,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
 
                /* C14-13:2 */
                if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
-                       if (node_type != RDMA_NODE_IB_SWITCH)
+                       if (!is_switch)
                                return IB_SMI_DISCARD;
 
                        (*hop_ptr)--;
@@ -108,7 +108,7 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
                if (*hop_ptr == 1) {
                        (*hop_ptr)--;
                        /* C14-13:3 -- SMPs destined for SM shouldn't be here */
-                       return (node_type == RDMA_NODE_IB_SWITCH ||
+                       return (is_switch ||
                                dr_slid_is_permissive ?
                                IB_SMI_HANDLE : IB_SMI_DISCARD);
                }
@@ -127,9 +127,9 @@ static enum smi_action __smi_handle_dr_smp_send(u8 node_type, int port_num,
  * Return IB_SMI_DISCARD if the SMP should be discarded
  */
 enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
-                                      u8 node_type, int port_num)
+                                      bool is_switch, int port_num)
 {
-       return __smi_handle_dr_smp_send(node_type, port_num,
+       return __smi_handle_dr_smp_send(is_switch, port_num,
                                        &smp->hop_ptr, smp->hop_cnt,
                                        smp->initial_path,
                                        smp->return_path,
@@ -139,9 +139,9 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
 }
 
 enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
-                                      u8 node_type, int port_num)
+                                      bool is_switch, int port_num)
 {
-       return __smi_handle_dr_smp_send(node_type, port_num,
+       return __smi_handle_dr_smp_send(is_switch, port_num,
                                        &smp->hop_ptr, smp->hop_cnt,
                                        smp->route.dr.initial_path,
                                        smp->route.dr.return_path,
@@ -152,7 +152,7 @@ enum smi_action opa_smi_handle_dr_smp_send(struct opa_smp *smp,
                                        OPA_LID_PERMISSIVE);
 }
 
-static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
+static enum smi_action __smi_handle_dr_smp_recv(bool is_switch, int port_num,
                                                int phys_port_cnt,
                                                u8 *hop_ptr, u8 hop_cnt,
                                                const u8 *initial_path,
@@ -173,7 +173,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
 
                /* C14-9:2 -- intermediate hop */
                if (*hop_ptr && *hop_ptr < hop_cnt) {
-                       if (node_type != RDMA_NODE_IB_SWITCH)
+                       if (!is_switch)
                                return IB_SMI_DISCARD;
 
                        return_path[*hop_ptr] = port_num;
@@ -188,7 +188,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
                                return_path[*hop_ptr] = port_num;
                        /* hop_ptr updated when sending */
 
-                       return (node_type == RDMA_NODE_IB_SWITCH ||
+                       return (is_switch ||
                                dr_dlid_is_permissive ?
                                IB_SMI_HANDLE : IB_SMI_DISCARD);
                }
@@ -208,7 +208,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
 
                /* C14-13:2 */
                if (2 <= *hop_ptr && *hop_ptr <= hop_cnt) {
-                       if (node_type != RDMA_NODE_IB_SWITCH)
+                       if (!is_switch)
                                return IB_SMI_DISCARD;
 
                        /* hop_ptr updated when sending */
@@ -224,8 +224,7 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
                                return IB_SMI_HANDLE;
                        }
                        /* hop_ptr updated when sending */
-                       return (node_type == RDMA_NODE_IB_SWITCH ?
-                               IB_SMI_HANDLE : IB_SMI_DISCARD);
+                       return (is_switch ? IB_SMI_HANDLE : IB_SMI_DISCARD);
                }
 
                /* C14-13:4 -- hop_ptr = 0 -> give to SM */
@@ -238,10 +237,10 @@ static enum smi_action __smi_handle_dr_smp_recv(u8 node_type, int port_num,
  * Adjust information for a received SMP
  * Return IB_SMI_DISCARD if the SMP should be dropped
  */
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
                                       int port_num, int phys_port_cnt)
 {
-       return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+       return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
                                        &smp->hop_ptr, smp->hop_cnt,
                                        smp->initial_path,
                                        smp->return_path,
@@ -254,10 +253,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
  * Adjust information for a received SMP
  * Return IB_SMI_DISCARD if the SMP should be dropped
  */
-enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, u8 node_type,
+enum smi_action opa_smi_handle_dr_smp_recv(struct opa_smp *smp, bool is_switch,
                                           int port_num, int phys_port_cnt)
 {
-       return __smi_handle_dr_smp_recv(node_type, port_num, phys_port_cnt,
+       return __smi_handle_dr_smp_recv(is_switch, port_num, phys_port_cnt,
                                        &smp->hop_ptr, smp->hop_cnt,
                                        smp->route.dr.initial_path,
                                        smp->route.dr.return_path,
index aff96bac49b4c9e126a1e4b6fd309bc4b3b256aa..33c91c8a16e9524da0da6c00ed7a3259084f2c74 100644 (file)
@@ -51,12 +51,12 @@ enum smi_forward_action {
        IB_SMI_FORWARD  /* SMP should be forwarded (for switches only) */
 };
 
-enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
+enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, bool is_switch,
                                       int port_num, int phys_port_cnt);
 int smi_get_fwd_port(struct ib_smp *smp);
 extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
 extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
-                                             u8 node_type, int port_num);
+                                             bool is_switch, int port_num);
 
 /*
  * Return IB_SMI_HANDLE if the SMP should be handled by the local SMA/SM
index ed6b6c85c334b124e3fa4a47225c3f8a6b5c62df..0b84a9cdfe5b90636d3633dbb42d84ef84c4e98e 100644 (file)
@@ -870,7 +870,7 @@ int ib_device_register_sysfs(struct ib_device *device,
                goto err_put;
        }
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
+       if (rdma_cap_ib_switch(device)) {
                ret = add_port(device, 0, port_callback);
                if (ret)
                        goto err_put;
index 62c24b1452b89e2546f2e023a560ee3a21f222e4..00948107364466cafe28e95557be1fcf829e988c 100644 (file)
@@ -1193,6 +1193,7 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
        return 0;
 }
 
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
 static void ib_ucm_release_dev(struct device *dev)
 {
        struct ib_ucm_device *ucm_dev;
@@ -1202,7 +1203,7 @@ static void ib_ucm_release_dev(struct device *dev)
        if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
                clear_bit(ucm_dev->devnum, dev_map);
        else
-               clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
+               clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, overflow_map);
        kfree(ucm_dev);
 }
 
@@ -1226,7 +1227,6 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
 
 static dev_t overflow_maj;
-static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
 static int find_overflow_devnum(void)
 {
        int ret;
index ad45469f7582dbe47788c5b1330803148c0b5dab..29b21213ea7586129357bd803c7de89096227bf1 100644 (file)
@@ -1354,10 +1354,10 @@ static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
        /* Acquire mutex's based on pointer comparison to prevent deadlock. */
        if (file1 < file2) {
                mutex_lock(&file1->mut);
-               mutex_lock(&file2->mut);
+               mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
        } else {
                mutex_lock(&file2->mut);
-               mutex_lock(&file1->mut);
+               mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
        }
 }
 
@@ -1616,6 +1616,7 @@ static void __exit ucma_cleanup(void)
        device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
        misc_deregister(&ucma_misc);
        idr_destroy(&ctx_idr);
+       idr_destroy(&multicast_idr);
 }
 
 module_init(ucma_init);
index b1b73232f21702161d7dc068f08e23e6759e04e8..bbbe0184e5922f6dab1fce7c56a92d1422da986e 100644 (file)
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
        /*
         * T3 only supports 32 bits of size.
         */
+       if (sizeof(phys_addr_t) > 4) {
+               pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
+               return ERR_PTR(-ENOTSUPP);
+       }
        bl.size = 0xffffffff;
        bl.addr = 0;
        kva = 0;
index 12b5bc23832b13804c1f07b61ee4d83b8650dcd1..376b031c2c7fa0e00bee607ac3462c74e8d3594c 100644 (file)
@@ -226,8 +226,9 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
                return IB_MAD_RESULT_FAILURE;
index 2d7e503d13cb5b9c2855936ce162f41d49ca0ced..871dbe56216a27e75ee64a1e4646ca027dd22a27 100644 (file)
@@ -31,6 +31,8 @@
  * SOFTWARE.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/sched.h>
 #include <linux/spinlock.h>
 #include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        u32 bar0 = 0, bar1 = 0;
 
 #ifdef CONFIG_X86_64
-       if (WARN(pat_enabled(),
-                "ipath needs PAT disabled, boot with nopat kernel parameter\n")) {
+       if (pat_enabled()) {
+               pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
                ret = -ENODEV;
                goto bail;
        }
index 948188e37f95ab3fc2dfb2dd4ab7a1698c0ae84f..ad3a926ab3c5d41b393ca04bee9a851ec196460c 100644 (file)
@@ -1499,8 +1499,9 @@ int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        switch (in_mad->mad_hdr.mgmt_class) {
        case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
index 48253b839a6f741535c11a93cce685e2cce37901..30ba49c4a98c06b21dff0e0599569dfad19f1f17 100644 (file)
@@ -2044,9 +2044,9 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
 
        spin_lock_init(&idev->qp_table.lock);
        spin_lock_init(&idev->lk_table.lock);
-       idev->sm_lid = __constant_be16_to_cpu(IB_LID_PERMISSIVE);
+       idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
        /* Set the prefix to the default value (see ch. 4.1.1) */
-       idev->gid_prefix = __constant_cpu_to_be64(0xfe80000000000000ULL);
+       idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
 
        ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
        if (ret)
index 36eb3d012b6d34ac96823cb193001afb42ae95ce..180a8f7ec82de80fdf69f4226d9bb0d8fff54052 100644 (file)
@@ -871,7 +871,7 @@ repoll:
                if (is_eth) {
                        wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
                        if (be32_to_cpu(cqe->vlan_my_qpn) &
-                                       MLX4_CQE_VLAN_PRESENT_MASK) {
+                                       MLX4_CQE_CVLAN_PRESENT_MASK) {
                                wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
                                        MLX4_CQE_VID_MASK;
                        } else {
index 85a50df2f20360e1b8da63959b49901843012198..68b3dfa922bf3e01ce3c00a60674ca508fd50912 100644 (file)
@@ -860,21 +860,31 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        struct mlx4_ib_dev *dev = to_mdev(ibdev);
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
+       enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
-       switch (rdma_port_get_link_layer(ibdev, port_num)) {
-       case IB_LINK_LAYER_INFINIBAND:
-               if (!mlx4_is_slave(dev->dev))
-                       return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
-                                             in_grh, in_mad, out_mad);
-       case IB_LINK_LAYER_ETHERNET:
-               return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
-                                         in_grh, in_mad, out_mad);
-       default:
-               return -EINVAL;
+       /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
+        * queries, should be called only by VFs and for that specific purpose
+        */
+       if (link == IB_LINK_LAYER_INFINIBAND) {
+               if (mlx4_is_slave(dev->dev) &&
+                   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+                   in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
+                       return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+                                               in_grh, in_mad, out_mad);
+
+               return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+                                     in_grh, in_mad, out_mad);
        }
+
+       if (link == IB_LINK_LAYER_ETHERNET)
+               return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+                                       in_grh, in_mad, out_mad);
+
+       return -EINVAL;
 }
 
 static void send_handler(struct ib_mad_agent *agent,
index 067a691ecbed449e098d333e8d82cf62c47ebd7a..8be6db81646049a741abebe1c4ab982fad7e6011 100644 (file)
@@ -253,14 +253,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
        props->timestamp_mask = 0xFFFFFFFFFFFFULL;
 
-       err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
-       if (err)
-               goto out;
+       if (!mlx4_is_slave(dev->dev))
+               err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
 
        if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
-               resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
                resp.response_length += sizeof(resp.hca_core_clock_offset);
-               resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+               if (!err && !mlx4_is_slave(dev->dev)) {
+                       resp.comp_mask |= QUERY_DEVICE_RESP_MASK_TIMESTAMP;
+                       resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
+               }
        }
 
        if (uhw->outlen) {
@@ -2669,31 +2670,33 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
        dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
        if (!dm) {
                pr_err("failed to allocate memory for tunneling qp update\n");
-               goto out;
+               return;
        }
 
        for (i = 0; i < ports; i++) {
                dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
                if (!dm[i]) {
                        pr_err("failed to allocate memory for tunneling qp update work struct\n");
-                       for (i = 0; i < dev->caps.num_ports; i++) {
-                               if (dm[i])
-                                       kfree(dm[i]);
-                       }
+                       while (--i >= 0)
+                               kfree(dm[i]);
                        goto out;
                }
-       }
-       /* initialize or tear down tunnel QPs for the slave */
-       for (i = 0; i < ports; i++) {
                INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
                dm[i]->port = first_port + i + 1;
                dm[i]->slave = slave;
                dm[i]->do_init = do_init;
                dm[i]->dev = ibdev;
-               spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
-               if (!ibdev->sriov.is_going_down)
+       }
+       /* initialize or tear down tunnel QPs for the slave */
+       spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
+       if (!ibdev->sriov.is_going_down) {
+               for (i = 0; i < ports; i++)
                        queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
                spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+       } else {
+               spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+               for (i = 0; i < ports; i++)
+                       kfree(dm[i]);
        }
 out:
        kfree(dm);
index 01fc97db45d6e8e3b92f38b1f667627ea4f2a60c..b84d13a487cc04dcfbb501cfbd460c335bf91f8d 100644 (file)
@@ -68,8 +68,9 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
 
index 6b2418b74c99ab84345403afe9d310f04aa465e5..7c3f2fb44ba51d8f288221df7b4d316a56adab28 100644 (file)
@@ -209,8 +209,9 @@ int mthca_process_mad(struct ib_device *ibdev,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        /* Forward locally generated traps to the SM */
        if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
index 9047af4299065f543252a29297fbd40e603531b3..8a3ad170d790cc336c08a527db314d859beefd6c 100644 (file)
@@ -1520,8 +1520,9 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        int rc = arpindex;
        struct net_device *netdev;
        struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
+       __be32 dst_ipaddr = htonl(dst_ip);
 
-       rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0);
+       rt = ip_route_output(&init_net, dst_ipaddr, nesvnic->local_ipaddr, 0, 0);
        if (IS_ERR(rt)) {
                printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n",
                       __func__, dst_ip);
@@ -1533,7 +1534,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
        else
                netdev = nesvnic->netdev;
 
-       neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+       neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
 
        rcu_read_lock();
        if (neigh) {
index 02120d340d50cffa62fa87bb31e4e22502d5e547..4713dd7ed76432b6d11b042e6dfd24c268f82325 100644 (file)
@@ -3861,7 +3861,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
                                (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
                                (((u32)mac_addr[4]) << 8)  | (u32)mac_addr[5]);
                cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
-                               (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
+                               (((u32)mac_addr[0]) << 8) | (u32)mac_addr[1]);
        } else {
                cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
                cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
index b396344fae16af33153f0625104d95c17f488a8d..6a36338593cd0a1c09b1ad67c1d7d57f856b0e93 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_H__
 #define __OCRDMA_H__
index 1554cca5712aafd5659d1cfb80bafff7095f67ca..430b1350fe96ecd1eb16a67f18c2bc3a8e6afa80 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_ABI_H__
 #define __OCRDMA_ABI_H__
index 4bafa15708d0fc4212587cb4dfd6fce7d3f3211b..44766fee1f4e2cacb2d971f7858ad6e3eb4c4875 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <net/neighbour.h>
 #include <net/netevent.h>
@@ -215,8 +230,9 @@ int ocrdma_process_mad(struct ib_device *ibdev,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        switch (in_mad->mad_hdr.mgmt_class) {
        case IB_MGMT_CLASS_PERF_MGMT:
index cf366fe03cb822580fe96b69e5873ad255a34632..04a30ae674739b87dc266194f68aa27271df653e 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_AH_H__
 #define __OCRDMA_AH_H__
index 47615ff33bc6a1fb8c0c703b9f975acc6afe79c2..aab391a15db429104f52765346455ba07efa424b 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) CNA Adapters.              *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <linux/sched.h>
 #include <linux/interrupt.h>
index e905972fceb7d48ff882800390c1330367815caf..7ed885c1851e28740b81a0588493c0d0ca92bc42 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) CNA Adapters.              *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_HW_H__
 #define __OCRDMA_HW_H__
index 8a1398b253a2bec42f2d0032e0aa915ce747bfd9..b119a3413a155574ae1eb1bdd020bb7d77e42822 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <linux/module.h>
 #include <linux/idr.h>
@@ -46,7 +61,7 @@
 MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
 MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
 MODULE_AUTHOR("Emulex Corporation");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
 
 static LIST_HEAD(ocrdma_dev_list);
 static DEFINE_SPINLOCK(ocrdma_devlist_lock);
@@ -696,6 +711,7 @@ static void __exit ocrdma_exit_module(void)
        ocrdma_unregister_inet6addr_notifier();
        ocrdma_unregister_inetaddr_notifier();
        ocrdma_rem_debugfs();
+       idr_destroy(&ocrdma_dev_id);
 }
 
 module_init(ocrdma_init_module);
index 02ad0aee99afc0c5e9449c4f58353e57d38903f1..80006b24aa118e752f444383fc9f4f3c3bafb191 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_SLI_H__
 #define __OCRDMA_SLI_H__
index 48d7ef51aa0c209678e0ed4bbe97bc5ff9a881d5..69334e214571b94ae0305bc8c9821aa0e261b7fe 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2014 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <rdma/ib_addr.h>
 #include <rdma/ib_pma.h>
index 091edd68a8a34678e5283b2374c58274c84580b3..c9e58d04c7b8d15c15c5d8ae4205c87cc3be521c 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2014 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_STATS_H__
 #define __OCRDMA_STATS_H__
index 5bb61eb58f2c71859969d73ac6e326d4dafc51fd..bc84cd462ecf3208e8576ad1ba1084578ddb7c29 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #include <linux/dma-mapping.h>
 #include <rdma/ib_verbs.h>
index b15c608efa7b03c72a2eb44e1f7b8af919db13b0..eaccb2d3cb9ff52f51fe0bbba3334053d519d047 100644 (file)
@@ -1,21 +1,36 @@
-/*******************************************************************
- * This file is part of the Emulex RoCE Device Driver for          *
- * RoCE (RDMA over Converged Ethernet) adapters.                   *
- * Copyright (C) 2008-2012 Emulex. All rights reserved.            *
- * EMULEX and SLI are trademarks of Emulex.                        *
- * www.emulex.com                                                  *
- *                                                                 *
- * This program is free software; you can redistribute it and/or   *
- * modify it under the terms of version 2 of the GNU General       *
- * Public License as published by the Free Software Foundation.    *
- * This program is distributed in the hope that it will be useful. *
- * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
- * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
- * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
- * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
- * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
- * more details, a copy of which can be found in the file COPYING  *
- * included with this package.                                     *
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ *   this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in
+ *   the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  * Contact Information:
  * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
  * Emulex
  * 3333 Susan Street
  * Costa Mesa, CA 92626
- *******************************************************************/
+ */
 
 #ifndef __OCRDMA_VERBS_H__
 #define __OCRDMA_VERBS_H__
index 05e3242d84425acd6229204e642084a4ce0f654d..9625e7c438e57749c12495799fb896ac293880fd 100644 (file)
@@ -2412,8 +2412,9 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
        const struct ib_mad *in_mad = (const struct ib_mad *)in;
        struct ib_mad *out_mad = (struct ib_mad *)out;
 
-       BUG_ON(in_mad_size != sizeof(*in_mad) ||
-              *out_mad_size != sizeof(*out_mad));
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
 
        switch (in_mad->mad_hdr.mgmt_class) {
        case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
index bd94b0a6e9e535f8d8b4a9e1fa1428e0696e2947..79859c4d43c9c572f4946a364b87fdc74acf40a3 100644 (file)
@@ -239,7 +239,7 @@ struct ipoib_cm_tx {
        struct net_device   *dev;
        struct ipoib_neigh  *neigh;
        struct ipoib_path   *path;
-       struct ipoib_cm_tx_buf *tx_ring;
+       struct ipoib_tx_buf *tx_ring;
        unsigned             tx_head;
        unsigned             tx_tail;
        unsigned long        flags;
@@ -504,6 +504,33 @@ int ipoib_mcast_stop_thread(struct net_device *dev);
 void ipoib_mcast_dev_down(struct net_device *dev);
 void ipoib_mcast_dev_flush(struct net_device *dev);
 
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+                       struct ipoib_tx_buf *tx_req);
+
+static inline void ipoib_build_sge(struct ipoib_dev_priv *priv,
+                                  struct ipoib_tx_buf *tx_req)
+{
+       int i, off;
+       struct sk_buff *skb = tx_req->skb;
+       skb_frag_t *frags = skb_shinfo(skb)->frags;
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       u64 *mapping = tx_req->mapping;
+
+       if (skb_headlen(skb)) {
+               priv->tx_sge[0].addr         = mapping[0];
+               priv->tx_sge[0].length       = skb_headlen(skb);
+               off = 1;
+       } else
+               off = 0;
+
+       for (i = 0; i < nr_frags; ++i) {
+               priv->tx_sge[i + off].addr = mapping[i + off];
+               priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
+       }
+       priv->tx_wr.num_sge          = nr_frags + off;
+}
+
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
index cf32a778e7d0ccc0b6225d9c01442f5d2ec4cdb1..ee39be6ccfb0fdd9aa75ad408543e1f0b08ff1d4 100644 (file)
@@ -694,14 +694,12 @@ repost:
 static inline int post_send(struct ipoib_dev_priv *priv,
                            struct ipoib_cm_tx *tx,
                            unsigned int wr_id,
-                           u64 addr, int len)
+                           struct ipoib_tx_buf *tx_req)
 {
        struct ib_send_wr *bad_wr;
 
-       priv->tx_sge[0].addr          = addr;
-       priv->tx_sge[0].length        = len;
+       ipoib_build_sge(priv, tx_req);
 
-       priv->tx_wr.num_sge     = 1;
        priv->tx_wr.wr_id       = wr_id | IPOIB_OP_CM;
 
        return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
@@ -710,8 +708,7 @@ static inline int post_send(struct ipoib_dev_priv *priv,
 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
-       struct ipoib_cm_tx_buf *tx_req;
-       u64 addr;
+       struct ipoib_tx_buf *tx_req;
        int rc;
 
        if (unlikely(skb->len > tx->mtu)) {
@@ -735,24 +732,21 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
         */
        tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
        tx_req->skb = skb;
-       addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
-       if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
+
+       if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
                ++dev->stats.tx_errors;
                dev_kfree_skb_any(skb);
                return;
        }
 
-       tx_req->mapping = addr;
-
        skb_orphan(skb);
        skb_dst_drop(skb);
 
-       rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
-                      addr, skb->len);
+       rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
        if (unlikely(rc)) {
                ipoib_warn(priv, "post_send failed, error %d\n", rc);
                ++dev->stats.tx_errors;
-               ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
+               ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(skb);
        } else {
                dev->trans_start = jiffies;
@@ -777,7 +771,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_cm_tx *tx = wc->qp->qp_context;
        unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
-       struct ipoib_cm_tx_buf *tx_req;
+       struct ipoib_tx_buf *tx_req;
        unsigned long flags;
 
        ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
@@ -791,7 +785,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
        tx_req = &tx->tx_ring[wr_id];
 
-       ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
+       ipoib_dma_unmap_tx(priv, tx_req);
 
        /* FIXME: is this right? Shouldn't we only increment on success? */
        ++dev->stats.tx_packets;
@@ -1036,6 +1030,9 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
 
        struct ib_qp *tx_qp;
 
+       if (dev->features & NETIF_F_SG)
+               attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+
        tx_qp = ib_create_qp(priv->pd, &attr);
        if (PTR_ERR(tx_qp) == -EINVAL) {
                ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
@@ -1170,7 +1167,7 @@ err_tx:
 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
 {
        struct ipoib_dev_priv *priv = netdev_priv(p->dev);
-       struct ipoib_cm_tx_buf *tx_req;
+       struct ipoib_tx_buf *tx_req;
        unsigned long begin;
 
        ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1197,8 +1194,7 @@ timeout:
 
        while ((int) p->tx_tail - (int) p->tx_head < 0) {
                tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
-               ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
-                                   DMA_TO_DEVICE);
+               ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(tx_req->skb);
                ++p->tx_tail;
                netif_tx_lock_bh(p->dev);
@@ -1455,7 +1451,6 @@ static void ipoib_cm_stale_task(struct work_struct *work)
        spin_unlock_irq(&priv->lock);
 }
 
-
 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
                         char *buf)
 {
index 63b92cbb29ad0ad1f0165a738a47efbe0f650e04..d266667ca9b82273dd4b7abb4856f69b29f65174 100644 (file)
@@ -263,8 +263,7 @@ repost:
                           "for buf %d\n", wr_id);
 }
 
-static int ipoib_dma_map_tx(struct ib_device *ca,
-                           struct ipoib_tx_buf *tx_req)
+int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
 {
        struct sk_buff *skb = tx_req->skb;
        u64 *mapping = tx_req->mapping;
@@ -305,8 +304,8 @@ partial_error:
        return -EIO;
 }
 
-static void ipoib_dma_unmap_tx(struct ib_device *ca,
-                              struct ipoib_tx_buf *tx_req)
+void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
+                       struct ipoib_tx_buf *tx_req)
 {
        struct sk_buff *skb = tx_req->skb;
        u64 *mapping = tx_req->mapping;
@@ -314,7 +313,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
        int off;
 
        if (skb_headlen(skb)) {
-               ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
+               ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
+                                   DMA_TO_DEVICE);
                off = 1;
        } else
                off = 0;
@@ -322,8 +322,8 @@ static void ipoib_dma_unmap_tx(struct ib_device *ca,
        for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag),
-                                 DMA_TO_DEVICE);
+               ib_dma_unmap_page(priv->ca, mapping[i + off],
+                                 skb_frag_size(frag), DMA_TO_DEVICE);
        }
 }
 
@@ -389,7 +389,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
        tx_req = &priv->tx_ring[wr_id];
 
-       ipoib_dma_unmap_tx(priv->ca, tx_req);
+       ipoib_dma_unmap_tx(priv, tx_req);
 
        ++dev->stats.tx_packets;
        dev->stats.tx_bytes += tx_req->skb->len;
@@ -514,24 +514,10 @@ static inline int post_send(struct ipoib_dev_priv *priv,
                            void *head, int hlen)
 {
        struct ib_send_wr *bad_wr;
-       int i, off;
        struct sk_buff *skb = tx_req->skb;
-       skb_frag_t *frags = skb_shinfo(skb)->frags;
-       int nr_frags = skb_shinfo(skb)->nr_frags;
-       u64 *mapping = tx_req->mapping;
 
-       if (skb_headlen(skb)) {
-               priv->tx_sge[0].addr         = mapping[0];
-               priv->tx_sge[0].length       = skb_headlen(skb);
-               off = 1;
-       } else
-               off = 0;
+       ipoib_build_sge(priv, tx_req);
 
-       for (i = 0; i < nr_frags; ++i) {
-               priv->tx_sge[i + off].addr = mapping[i + off];
-               priv->tx_sge[i + off].length = skb_frag_size(&frags[i]);
-       }
-       priv->tx_wr.num_sge          = nr_frags + off;
        priv->tx_wr.wr_id            = wr_id;
        priv->tx_wr.wr.ud.remote_qpn = qpn;
        priv->tx_wr.wr.ud.ah         = address;
@@ -617,7 +603,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                ipoib_warn(priv, "post_send failed, error %d\n", rc);
                ++dev->stats.tx_errors;
                --priv->tx_outstanding;
-               ipoib_dma_unmap_tx(priv->ca, tx_req);
+               ipoib_dma_unmap_tx(priv, tx_req);
                dev_kfree_skb_any(skb);
                if (netif_queue_stopped(dev))
                        netif_wake_queue(dev);
@@ -868,7 +854,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
                        while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
                                tx_req = &priv->tx_ring[priv->tx_tail &
                                                        (ipoib_sendq_size - 1)];
-                               ipoib_dma_unmap_tx(priv->ca, tx_req);
+                               ipoib_dma_unmap_tx(priv, tx_req);
                                dev_kfree_skb_any(tx_req->skb);
                                ++priv->tx_tail;
                                --priv->tx_outstanding;
@@ -985,20 +971,21 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
 }
 
 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
-                               enum ipoib_flush_level level)
+                               enum ipoib_flush_level level,
+                               int nesting)
 {
        struct ipoib_dev_priv *cpriv;
        struct net_device *dev = priv->dev;
        int result;
 
-       down_read(&priv->vlan_rwsem);
+       down_read_nested(&priv->vlan_rwsem, nesting);
 
        /*
         * Flush any child interfaces too -- they might be up even if
         * the parent is down.
         */
        list_for_each_entry(cpriv, &priv->child_intfs, list)
-               __ipoib_ib_dev_flush(cpriv, level);
+               __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
 
        up_read(&priv->vlan_rwsem);
 
@@ -1076,7 +1063,7 @@ void ipoib_ib_dev_flush_light(struct work_struct *work)
        struct ipoib_dev_priv *priv =
                container_of(work, struct ipoib_dev_priv, flush_light);
 
-       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT);
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
 }
 
 void ipoib_ib_dev_flush_normal(struct work_struct *work)
@@ -1084,7 +1071,7 @@ void ipoib_ib_dev_flush_normal(struct work_struct *work)
        struct ipoib_dev_priv *priv =
                container_of(work, struct ipoib_dev_priv, flush_normal);
 
-       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL);
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
 }
 
 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
@@ -1092,7 +1079,7 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work)
        struct ipoib_dev_priv *priv =
                container_of(work, struct ipoib_dev_priv, flush_heavy);
 
-       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY);
+       __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
 }
 
 void ipoib_ib_dev_cleanup(struct net_device *dev)
index da149c278cb8149a7541169c7b05147be82f8ed5..b2943c84a5dda0aecdd8904917f2ebbb02b9b013 100644 (file)
@@ -190,7 +190,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
        if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
-               features &= ~(NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+               features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
 
        return features;
 }
@@ -232,6 +232,7 @@ int ipoib_set_mode(struct net_device *dev, const char *buf)
                ipoib_warn(priv, "enabling connected mode "
                           "will cause multicast packet drops\n");
                netdev_update_features(dev);
+               dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
                rtnl_unlock();
                priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
 
@@ -1577,7 +1578,8 @@ static struct net_device *ipoib_add_port(const char *format,
        SET_NETDEV_DEV(priv->dev, hca->dma_device);
        priv->dev->dev_id = port - 1;
 
-       if (!ib_query_port(hca, port, &attr))
+       result = ib_query_port(hca, port, &attr);
+       if (!result)
                priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
        else {
                printk(KERN_WARNING "%s: ib_query_port %d failed\n",
@@ -1598,7 +1600,8 @@ static struct net_device *ipoib_add_port(const char *format,
                goto device_init_failed;
        }
 
-       if (ipoib_set_dev_features(priv, hca))
+       result = ipoib_set_dev_features(priv, hca);
+       if (result)
                goto device_init_failed;
 
        /*
@@ -1684,7 +1687,7 @@ static void ipoib_add_one(struct ib_device *device)
        struct list_head *dev_list;
        struct net_device *dev;
        struct ipoib_dev_priv *priv;
-       int s, e, p;
+       int p;
        int count = 0;
 
        dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
@@ -1693,15 +1696,7 @@ static void ipoib_add_one(struct ib_device *device)
 
        INIT_LIST_HEAD(dev_list);
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               s = 0;
-               e = 0;
-       } else {
-               s = 1;
-               e = device->phys_port_cnt;
-       }
-
-       for (p = s; p <= e; ++p) {
+       for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
                if (!rdma_protocol_ib(device, p))
                        continue;
                dev = ipoib_add_port("ib%d", device, p);
index 9e6ee82a8fd76f490de93d6117754e2a6657ec72..851c8219d50104105ec8d97a3ba743cb6f59626b 100644 (file)
@@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                else
                        size += ipoib_recvq_size * ipoib_max_conn_qp;
        } else
-               goto out_free_wq;
+               if (ret != -ENOSYS)
+                       goto out_free_wq;
 
        cq_attr.cqe = size;
        priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
index 7717009631271ea6965a7d60b9dcf3f74acdd1d6..d851e1828d6f5152e9c8ca49de9b64a3b953f180 100644 (file)
@@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        ret = isert_rdma_post_recvl(isert_conn);
        if (ret)
                goto out_conn_dev;
+       /*
+        * Obtain the second reference now before isert_rdma_accept() to
+        * ensure that any initiator generated REJECT CM event that occurs
+        * asynchronously won't drop the last reference until the error path
+        * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
+        * isert_free_conn() -> isert_put_conn() -> kref_put().
+        */
+       if (!kref_get_unless_zero(&isert_conn->kref)) {
+               isert_warn("conn %p connect_release is running\n", isert_conn);
+               goto out_conn_dev;
+       }
 
        ret = isert_rdma_accept(isert_conn);
        if (ret)
@@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
 
        isert_info("conn %p\n", isert_conn);
 
-       if (!kref_get_unless_zero(&isert_conn->kref)) {
-               isert_warn("conn %p connect_release is running\n", isert_conn);
-               return;
-       }
-
        mutex_lock(&isert_conn->mutex);
        if (isert_conn->state != ISER_CONN_FULL_FEATURE)
                isert_conn->state = ISER_CONN_UP;
index 267dc4f7550236e89fae58ff80c4a3953cff877f..31a20b462266611299aeeae5cd51fd19b69b635e 100644 (file)
@@ -161,13 +161,10 @@ static int srp_tmo_set(const char *val, const struct kernel_param *kp)
 {
        int tmo, res;
 
-       if (strncmp(val, "off", 3) != 0) {
-               res = kstrtoint(val, 0, &tmo);
-               if (res)
-                       goto out;
-       } else {
-               tmo = -1;
-       }
+       res = srp_parse_tmo(&tmo, val);
+       if (res)
+               goto out;
+
        if (kp->arg == &srp_reconnect_delay)
                res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
                                    srp_dev_loss_tmo);
@@ -3379,7 +3376,7 @@ static void srp_add_one(struct ib_device *device)
        struct srp_device *srp_dev;
        struct ib_device_attr *dev_attr;
        struct srp_host *host;
-       int mr_page_shift, s, e, p;
+       int mr_page_shift, p;
        u64 max_pages_per_mr;
 
        dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
@@ -3443,15 +3440,7 @@ static void srp_add_one(struct ib_device *device)
        if (IS_ERR(srp_dev->mr))
                goto err_pd;
 
-       if (device->node_type == RDMA_NODE_IB_SWITCH) {
-               s = 0;
-               e = 0;
-       } else {
-               s = 1;
-               e = device->phys_port_cnt;
-       }
-
-       for (p = s; p <= e; ++p) {
+       for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
                host = srp_add_port(srp_dev, p);
                if (host)
                        list_add_tail(&host->list, &srp_dev->dev_list);
index 82897ca17f32349df3e3cc332b9b0204bbc524a3..60ff0a2390e5f02f7cffabb9a5154cc0c4b734e7 100644 (file)
@@ -302,7 +302,7 @@ static void srpt_get_iou(struct ib_dm_mad *mad)
        int i;
 
        ioui = (struct ib_dm_iou_info *)mad->data;
-       ioui->change_id = __constant_cpu_to_be16(1);
+       ioui->change_id = cpu_to_be16(1);
        ioui->max_controllers = 16;
 
        /* set present for slot 1 and empty for the rest */
@@ -330,13 +330,13 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
 
        if (!slot || slot > 16) {
                mad->mad_hdr.status
-                       = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+                       = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
                return;
        }
 
        if (slot > 2) {
                mad->mad_hdr.status
-                       = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+                       = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
                return;
        }
 
@@ -348,10 +348,10 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
        iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
        iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
        iocp->subsys_device_id = 0x0;
-       iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
-       iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
-       iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
-       iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+       iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+       iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
+       iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
+       iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
        iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
        iocp->rdma_read_depth = 4;
        iocp->send_size = cpu_to_be32(srp_max_req_size);
@@ -379,13 +379,13 @@ static void srpt_get_svc_entries(u64 ioc_guid,
 
        if (!slot || slot > 16) {
                mad->mad_hdr.status
-                       = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+                       = cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
                return;
        }
 
        if (slot > 2 || lo > hi || hi > 1) {
                mad->mad_hdr.status
-                       = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+                       = cpu_to_be16(DM_MAD_STATUS_NO_IOC);
                return;
        }
 
@@ -436,7 +436,7 @@ static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
                break;
        default:
                rsp_mad->mad_hdr.status =
-                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+                   cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
                break;
        }
 }
@@ -493,11 +493,11 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
                break;
        case IB_MGMT_METHOD_SET:
                dm_mad->mad_hdr.status =
-                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+                   cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
                break;
        default:
                dm_mad->mad_hdr.status =
-                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+                   cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
                break;
        }
 
@@ -1535,7 +1535,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
        memset(srp_rsp, 0, sizeof *srp_rsp);
        srp_rsp->opcode = SRP_RSP;
        srp_rsp->req_lim_delta =
-               __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+               cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
        srp_rsp->tag = tag;
        srp_rsp->status = status;
 
@@ -1585,8 +1585,8 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
        memset(srp_rsp, 0, sizeof *srp_rsp);
 
        srp_rsp->opcode = SRP_RSP;
-       srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
-                                   + atomic_xchg(&ch->req_lim_delta, 0));
+       srp_rsp->req_lim_delta =
+               cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
        srp_rsp->tag = tag;
 
        srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
@@ -1630,7 +1630,7 @@ static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
        switch (len) {
        case 8:
                if ((*((__be64 *)lun) &
-                    __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+                    cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
                        goto out_err;
                break;
        case 4:
@@ -2449,8 +2449,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        }
 
        if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
                ret = -EINVAL;
                pr_err("rejected SRP_LOGIN_REQ because its"
                       " length (%d bytes) is out of range (%d .. %d)\n",
@@ -2459,8 +2459,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        }
 
        if (!sport->enabled) {
-               rej->reason = __constant_cpu_to_be32(
-                            SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                ret = -EINVAL;
                pr_err("rejected SRP_LOGIN_REQ because the target port"
                       " has not yet been enabled\n");
@@ -2505,8 +2505,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
            || *(__be64 *)(req->target_port_id + 8) !=
               cpu_to_be64(srpt_service_guid)) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
                ret = -ENOMEM;
                pr_err("rejected SRP_LOGIN_REQ because it"
                       " has an invalid target port identifier.\n");
@@ -2515,8 +2515,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 
        ch = kzalloc(sizeof *ch, GFP_KERNEL);
        if (!ch) {
-               rej->reason = __constant_cpu_to_be32(
-                                       SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                pr_err("rejected SRP_LOGIN_REQ because no memory.\n");
                ret = -ENOMEM;
                goto reject;
@@ -2552,8 +2552,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 
        ret = srpt_create_ch_ib(ch);
        if (ret) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                pr_err("rejected SRP_LOGIN_REQ because creating"
                       " a new RDMA channel failed.\n");
                goto free_ring;
@@ -2561,8 +2561,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
 
        ret = srpt_ch_qp_rtr(ch, ch->qp);
        if (ret) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                pr_err("rejected SRP_LOGIN_REQ because enabling"
                       " RTR failed (error code = %d)\n", ret);
                goto destroy_ib;
@@ -2580,15 +2579,15 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        if (!nacl) {
                pr_info("Rejected login because no ACL has been"
                        " configured yet for initiator %s.\n", ch->sess_name);
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
                goto destroy_ib;
        }
 
        ch->sess = transport_init_session(TARGET_PROT_NORMAL);
        if (IS_ERR(ch->sess)) {
-               rej->reason = __constant_cpu_to_be32(
-                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               rej->reason = cpu_to_be32(
+                             SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                pr_debug("Failed to create session\n");
                goto deregister_session;
        }
@@ -2604,8 +2603,8 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        rsp->max_it_iu_len = req->req_it_iu_len;
        rsp->max_ti_iu_len = req->req_it_iu_len;
        ch->max_ti_iu_len = it_iu_len;
-       rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
-                                             | SRP_BUF_FORMAT_INDIRECT);
+       rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+                                  | SRP_BUF_FORMAT_INDIRECT);
        rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
        atomic_set(&ch->req_lim, ch->rq_size);
        atomic_set(&ch->req_lim_delta, 0);
@@ -2655,8 +2654,8 @@ free_ch:
 reject:
        rej->opcode = SRP_LOGIN_REJ;
        rej->tag = req->tag;
-       rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
-                                             | SRP_BUF_FORMAT_INDIRECT);
+       rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+                                  | SRP_BUF_FORMAT_INDIRECT);
 
        ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
                             (void *)rej, sizeof *rej);
index 074a65ed17bb4d595d2b48f13e0d2e03c2267bc6..766bf26601163c37aebf265160b4d3d0ce09617f 100644 (file)
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
 {
 }
 
+static int input_leds_get_count(struct input_dev *dev)
+{
+       unsigned int led_code;
+       int count = 0;
+
+       for_each_set_bit(led_code, dev->ledbit, LED_CNT)
+               if (input_led_info[led_code].name)
+                       count++;
+
+       return count;
+}
+
 static int input_leds_connect(struct input_handler *handler,
                              struct input_dev *dev,
                              const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
        int led_no;
        int error;
 
-       num_leds = bitmap_weight(dev->ledbit, LED_CNT);
+       num_leds = input_leds_get_count(dev);
        if (!num_leds)
                return -ENXIO;
 
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
                led->handle = &leds->handle;
                led->code = led_code;
 
-               if (WARN_ON(!input_led_info[led_code].name))
+               if (!input_led_info[led_code].name)
                        continue;
 
                led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
index b10709f0461559c4d5ca03f47ae1d82c75663e91..30e3442518f85cfe06732e0f331962458396008c 100644 (file)
@@ -2,6 +2,7 @@
  * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
  *
  * Copyright (C) 2008     Henrik Rydberg (rydberg@euromail.se)
+ * Copyright (C) 2015      John Horan (knasher@gmail.com)
  *
  * The USB initialization and package decoding was made by
  * Scott Shawcroft as part of the touchd user-space driver project:
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI   0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO    0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS    0x0292
+/* MacbookPro12,1 (2015) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI   0x0272
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO    0x0273
+#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS    0x0274
 
 #define BCM5974_DEVICE(prod) {                                 \
        .match_flags = (USB_DEVICE_ID_MATCH_DEVICE |            \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
        BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
+       /* MacbookPro12,1 */
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
+       BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
        /* Terminating entry */
        {}
 };
@@ -180,21 +189,47 @@ struct bt_data {
 enum tp_type {
        TYPE1,                  /* plain trackpad */
        TYPE2,                  /* button integrated in trackpad */
-       TYPE3                   /* additional header fields since June 2013 */
+       TYPE3,                  /* additional header fields since June 2013 */
+       TYPE4                   /* additional header field for pressure data */
 };
 
 /* trackpad finger data offsets, le16-aligned */
-#define FINGER_TYPE1           (13 * sizeof(__le16))
-#define FINGER_TYPE2           (15 * sizeof(__le16))
-#define FINGER_TYPE3           (19 * sizeof(__le16))
+#define HEADER_TYPE1           (13 * sizeof(__le16))
+#define HEADER_TYPE2           (15 * sizeof(__le16))
+#define HEADER_TYPE3           (19 * sizeof(__le16))
+#define HEADER_TYPE4           (23 * sizeof(__le16))
 
 /* trackpad button data offsets */
+#define BUTTON_TYPE1           0
 #define BUTTON_TYPE2           15
 #define BUTTON_TYPE3           23
+#define BUTTON_TYPE4           31
 
 /* list of device capability bits */
 #define HAS_INTEGRATED_BUTTON  1
 
+/* trackpad finger data block size */
+#define FSIZE_TYPE1            (14 * sizeof(__le16))
+#define FSIZE_TYPE2            (14 * sizeof(__le16))
+#define FSIZE_TYPE3            (14 * sizeof(__le16))
+#define FSIZE_TYPE4            (15 * sizeof(__le16))
+
+/* offset from header to finger struct */
+#define DELTA_TYPE1            (0 * sizeof(__le16))
+#define DELTA_TYPE2            (0 * sizeof(__le16))
+#define DELTA_TYPE3            (0 * sizeof(__le16))
+#define DELTA_TYPE4            (1 * sizeof(__le16))
+
+/* usb control message mode switch data */
+#define USBMSG_TYPE1           8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE2           8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE3           8, 0x300, 0, 0, 0x1, 0x8
+#define USBMSG_TYPE4           2, 0x302, 2, 1, 0x1, 0x0
+
+/* Wellspring initialization constants */
+#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID                1
+#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID       9
+
 /* trackpad finger structure, le16-aligned */
 struct tp_finger {
        __le16 origin;          /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
        __le16 orientation;     /* 16384 when point, else 15 bit angle */
        __le16 touch_major;     /* touch area, major axis */
        __le16 touch_minor;     /* touch area, minor axis */
-       __le16 unused[3];       /* zeros */
+       __le16 unused[2];       /* zeros */
+       __le16 pressure;        /* pressure on forcetouch touchpad */
        __le16 multi;           /* one finger: varies, more fingers: constant */
 } __attribute__((packed,aligned(2)));
 
 /* trackpad finger data size, empirically at least ten fingers */
 #define MAX_FINGERS            16
-#define SIZEOF_FINGER          sizeof(struct tp_finger)
-#define SIZEOF_ALL_FINGERS     (MAX_FINGERS * SIZEOF_FINGER)
 #define MAX_FINGER_ORIENTATION 16384
 
 /* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
        int bt_datalen;         /* data length of the button interface */
        int tp_ep;              /* the endpoint of the trackpad interface */
        enum tp_type tp_type;   /* type of trackpad interface */
-       int tp_offset;          /* offset to trackpad finger data */
+       int tp_header;          /* bytes in header block */
        int tp_datalen;         /* data length of the trackpad interface */
+       int tp_button;          /* offset to button data */
+       int tp_fsize;           /* bytes in single finger block */
+       int tp_delta;           /* offset from header to finger struct */
+       int um_size;            /* usb control message length */
+       int um_req_val;         /* usb control message value */
+       int um_req_idx;         /* usb control message index */
+       int um_switch_idx;      /* usb control message mode switch index */
+       int um_switch_on;       /* usb control message mode switch on */
+       int um_switch_off;      /* usb control message mode switch off */
        struct bcm5974_param p; /* finger pressure limits */
        struct bcm5974_param w; /* finger width limits */
        struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
        int slots[MAX_FINGERS];                         /* slot assignments */
 };
 
+/* trackpad finger block data, le16-aligned */
+static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
+{
+       const struct bcm5974_config *c = &dev->cfg;
+       u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
+
+       return (const struct tp_finger *)(f_base + i * c->tp_fsize);
+}
+
+#define DATAFORMAT(type)                               \
+       type,                                           \
+       HEADER_##type,                                  \
+       HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
+       BUTTON_##type,                                  \
+       FSIZE_##type,                                   \
+       DELTA_##type,                                   \
+       USBMSG_##type
+
 /* logical signal quality */
 #define SN_PRESSURE    45              /* pressure signal-to-noise ratio */
 #define SN_WIDTH       25              /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
                0,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE1),
                { SN_PRESSURE, 0, 256 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
                0,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE1),
                { SN_PRESSURE, 0, 256 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
                HAS_INTEGRATED_BUTTON,
                0x84, sizeof(struct bt_data),
-               0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+               0x81, DATAFORMAT(TYPE2),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
                USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
                HAS_INTEGRATED_BUTTON,
                0, sizeof(struct bt_data),
-               0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS,
+               0x83, DATAFORMAT(TYPE3),
                { SN_PRESSURE, 0, 300 },
                { SN_WIDTH, 0, 2048 },
                { SN_COORD, -4620, 5140 },
                { SN_COORD, -150, 6600 },
                { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
        },
+       {
+               USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
+               USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
+               USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
+               HAS_INTEGRATED_BUTTON,
+               0, sizeof(struct bt_data),
+               0x83, DATAFORMAT(TYPE4),
+               { SN_PRESSURE, 0, 300 },
+               { SN_WIDTH, 0, 2048 },
+               { SN_COORD, -4828, 5345 },
+               { SN_COORD, -203, 6803 },
+               { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
+       },
        {}
 };
 
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
        struct input_dev *input = dev->input;
        int raw_n, i, n = 0;
 
-       if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0)
+       if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
                return -EIO;
 
-       /* finger data, le16-aligned */
-       f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
-       raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
+       raw_n = (size - c->tp_header) / c->tp_fsize;
 
        for (i = 0; i < raw_n; i++) {
-               if (raw2int(f[i].touch_major) == 0)
+               f = get_tp_finger(dev, i);
+               if (raw2int(f->touch_major) == 0)
                        continue;
-               dev->pos[n].x = raw2int(f[i].abs_x);
-               dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y);
-               dev->index[n++] = &f[i];
+               dev->pos[n].x = raw2int(f->abs_x);
+               dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
+               dev->index[n++] = f;
        }
 
        input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
 
        input_mt_sync_frame(input);
 
-       report_synaptics_data(input, c, f, raw_n);
+       report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
 
-       /* type 2 reports button events via ibt only */
-       if (c->tp_type == TYPE2) {
-               int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]);
+       /* later types report button events via integrated button only */
+       if (c->caps & HAS_INTEGRATED_BUTTON) {
+               int ibt = raw2int(dev->tp_data[c->tp_button]);
                input_report_key(input, BTN_LEFT, ibt);
        }
 
-       if (c->tp_type == TYPE3)
-               input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
-
        input_sync(input);
 
        return 0;
 }
 
-/* Wellspring initialization constants */
-#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID                1
-#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID       9
-#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE          0x300
-#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX          0
-#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE           0x01
-#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE           0x08
-
 static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
 {
+       const struct bcm5974_config *c = &dev->cfg;
        int retval = 0, size;
        char *data;
 
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
        if (dev->cfg.tp_type == TYPE3)
                return 0;
 
-       data = kmalloc(8, GFP_KERNEL);
+       data = kmalloc(c->um_size, GFP_KERNEL);
        if (!data) {
                dev_err(&dev->intf->dev, "out of memory\n");
                retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
        size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
                        BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
                        USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-                       BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
-                       BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+                       c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
 
-       if (size != 8) {
+       if (size != c->um_size) {
                dev_err(&dev->intf->dev, "could not read from device\n");
                retval = -EIO;
                goto out;
        }
 
        /* apply the mode switch */
-       data[0] = on ?
-               BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
-               BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
+       data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
 
        /* write configuration */
        size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
                        BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
                        USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-                       BCM5974_WELLSPRING_MODE_REQUEST_VALUE,
-                       BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
+                       c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
 
-       if (size != 8) {
+       if (size != c->um_size) {
                dev_err(&dev->intf->dev, "could not write to device\n");
                retval = -EIO;
                goto out;
index ce3d40004458c87392339472f654462fae7cf0bc..22b9ca901f4e96c22499ce9723c0d2bd897c6fbb 100644 (file)
@@ -1167,7 +1167,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        struct input_dev *dev = psmouse->dev;
        struct elantech_data *etd = psmouse->private;
        unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
-       unsigned int x_res = 0, y_res = 0;
+       unsigned int x_res = 31, y_res = 31;
 
        if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
                return -1;
@@ -1232,8 +1232,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
                /* For X to recognize me as touchpad. */
                input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
                input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
-               input_abs_set_res(dev, ABS_X, x_res);
-               input_abs_set_res(dev, ABS_Y, y_res);
                /*
                 * range of pressure and width is the same as v2,
                 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1244,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
                input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
                input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
                input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
-               input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
-               input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
                input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
                                     ETP_PMAX_V2, 0, 0);
                /*
@@ -1259,6 +1255,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
                break;
        }
 
+       input_abs_set_res(dev, ABS_X, x_res);
+       input_abs_set_res(dev, ABS_Y, y_res);
+       if (etd->hw_version > 1) {
+               input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+               input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
+       }
+
        etd->y_max = y_max;
        etd->width = width;
 
index 3a32caf06bf1dae9b9fb678947bb9c7243fc25da..6025eb430c0a5010c908961ccf8897943fd3c945 100644 (file)
@@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
        priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
 
        psmouse_info(psmouse,
-                    "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
+                    "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
                     SYN_ID_MODEL(priv->identity),
                     SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
                     priv->model_id,
                     priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
-                    priv->board_id, priv->firmware_id);
+                    priv->ext_cap_10, priv->board_id, priv->firmware_id);
 
        set_input_params(psmouse, priv);
 
index b4d12e29abff72008125c2406b3808cf174e0f71..e36162b28c2aae268166c2e735e318b793b9dfa8 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
        int abs_y_max;
        unsigned int max_touch_num;
        unsigned int int_trigger_type;
+       bool rotated_screen;
 };
 
 #define GOODIX_MAX_HEIGHT              4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
        IRQ_TYPE_LEVEL_HIGH,
 };
 
+/*
+ * Those tablets have their coordinates origin at the bottom right
+ * of the tablet, as if rotated 180 degrees
+ */
+static const struct dmi_system_id rotated_screen[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               .ident = "WinBook TW100",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
+               }
+       },
+       {
+               .ident = "WinBook TW700",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
+               },
+       },
+#endif
+       {}
+};
+
 /**
  * goodix_i2c_read - read data from a register of the i2c slave device.
  *
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
        int input_y = get_unaligned_le16(&coor_data[3]);
        int input_w = get_unaligned_le16(&coor_data[5]);
 
+       if (ts->rotated_screen) {
+               input_x = ts->abs_x_max - input_x;
+               input_y = ts->abs_y_max - input_y;
+       }
+
        input_mt_slot(ts->input_dev, id);
        input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
        input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
                ts->abs_y_max = GOODIX_MAX_HEIGHT;
                ts->max_touch_num = GOODIX_MAX_CONTACTS;
        }
+
+       ts->rotated_screen = dmi_check_system(rotated_screen);
+       if (ts->rotated_screen)
+               dev_dbg(&ts->client->dev,
+                        "Applying '180 degrees rotated screen' quirk\n");
 }
 
 /**
index f2c6c352c55af2d3bb4dbf243516b19b9c62a65a..2c41107240dec274e5ec34724a882d87189f0103 100644 (file)
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
                goto err_out;
        }
 
+       /* TSC-25 data sheet specifies a delay after the RESET command */
+       msleep(150);
+
        /* set coordinate output rate */
        buf[0] = buf[1] = 0xFF;
        ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
index a57e9b7498953bb9ebf947695caea46d73e2bfeb..658ee39e65696898422bcd9c825d8a49fbc37359 100644 (file)
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
  * Domain for untranslated devices - only allocated
  * if iommu=pt passed on kernel cmd line.
  */
-static struct protection_domain *pt_domain;
-
 static const struct iommu_ops amd_iommu_ops;
 
 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
        struct protection_domain *domain; /* Domain the device is bound to */
        u16 devid;                        /* PCI Device ID */
        bool iommu_v2;                    /* Device can make use of IOMMUv2 */
-       bool passthrough;                 /* Default for device is pt_domain */
+       bool passthrough;                 /* Device is identity mapped */
        struct {
                bool enabled;
                int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
 struct kmem_cache *amd_iommu_irq_cache;
 
 static void update_domain(struct protection_domain *domain);
-static int alloc_passthrough_domain(void);
 static int protection_domain_init(struct protection_domain *domain);
 
 /****************************************************************************
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
        dev_data = get_dev_data(dev);
 
        if (domain->flags & PD_IOMMUV2_MASK) {
-               if (!dev_data->iommu_v2 || !dev_data->passthrough)
+               if (!dev_data->passthrough)
                        return -EINVAL;
 
-               if (pdev_iommuv2_enable(pdev) != 0)
-                       return -EINVAL;
+               if (dev_data->iommu_v2) {
+                       if (pdev_iommuv2_enable(pdev) != 0)
+                               return -EINVAL;
 
-               dev_data->ats.enabled = true;
-               dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
-               dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
+                       dev_data->ats.enabled = true;
+                       dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
+                       dev_data->pri_tlp     = pci_pri_tlp_required(pdev);
+               }
        } else if (amd_iommu_iotlb_sup &&
                   pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
                dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
        do_detach(head);
 
        spin_unlock_irqrestore(&domain->lock, flags);
-
-       /*
-        * If we run in passthrough mode the device must be assigned to the
-        * passthrough domain if it is detached from any other domain.
-        * Make sure we can deassign from the pt_domain itself.
-        */
-       if (dev_data->passthrough &&
-           (dev_data->domain == NULL && domain != pt_domain))
-               __attach_device(dev_data, pt_domain);
 }
 
 /*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
        __detach_device(dev_data);
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 
-       if (domain->flags & PD_IOMMUV2_MASK)
+       if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
                pdev_iommuv2_disable(to_pci_dev(dev));
        else if (dev_data->ats.enabled)
                pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
 
        BUG_ON(!dev_data);
 
-       if (dev_data->iommu_v2)
+       if (iommu_pass_through || dev_data->iommu_v2)
                iommu_request_dm_for_dev(dev);
 
        /* Domains are initialized for this device - have a look what we ended up with */
        domain = iommu_get_domain_for_dev(dev);
-       if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+       if (domain->type == IOMMU_DOMAIN_IDENTITY)
                dev_data->passthrough = true;
-               dev->archdata.dma_ops = &nommu_dma_ops;
-       } else {
+       else
                dev->archdata.dma_ops = &amd_iommu_dma_ops;
-       }
 
 out:
        iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
 
 int __init amd_iommu_init_dma_ops(void)
 {
+       swiotlb        = iommu_pass_through ? 1 : 0;
        iommu_detected = 1;
-       swiotlb = 0;
+
+       /*
+        * In case we don't initialize SWIOTLB (actually the common case
+        * when AMD IOMMU is enabled), make sure there are global
+        * dma_ops set as a fall-back for devices not handled by this
+        * driver (for example non-PCI devices).
+        */
+       if (!swiotlb)
+               dma_ops = &nommu_dma_ops;
 
        amd_iommu_stats_init();
 
@@ -2947,21 +2944,6 @@ out_err:
        return NULL;
 }
 
-static int alloc_passthrough_domain(void)
-{
-       if (pt_domain != NULL)
-               return 0;
-
-       /* allocate passthrough domain */
-       pt_domain = protection_domain_alloc();
-       if (!pt_domain)
-               return -ENOMEM;
-
-       pt_domain->mode = PAGE_MODE_NONE;
-
-       return 0;
-}
-
 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
 {
        struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
  *
  *****************************************************************************/
 
-int __init amd_iommu_init_passthrough(void)
-{
-       struct iommu_dev_data *dev_data;
-       struct pci_dev *dev = NULL;
-       int ret;
-
-       ret = alloc_passthrough_domain();
-       if (ret)
-               return ret;
-
-       for_each_pci_dev(dev) {
-               if (!check_device(&dev->dev))
-                       continue;
-
-               dev_data = get_dev_data(&dev->dev);
-               dev_data->passthrough = true;
-
-               attach_device(&dev->dev, pt_domain);
-       }
-
-       amd_iommu_stats_init();
-
-       pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
-
-       return 0;
-}
-
 /* IOMMUv2 specific functions */
 int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
 {
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
                struct amd_iommu *iommu;
                int qdep;
 
-               BUG_ON(!dev_data->ats.enabled);
+               /*
+                  There might be non-IOMMUv2 capable devices in an IOMMUv2
+                * domain.
+                */
+               if (!dev_data->ats.enabled)
+                       continue;
 
                qdep  = dev_data->ats.qdep;
                iommu = amd_iommu_rlookup_table[dev_data->devid];
index dbda9ae68c5d70fff7926e39a2ef506b7165bf65..a24495eb4e26c5c596efa79c084b14c19fe5932c 100644 (file)
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
        return true;
 }
 
-static int amd_iommu_init_dma(void)
-{
-       if (iommu_pass_through)
-               return amd_iommu_init_passthrough();
-       else
-               return amd_iommu_init_dma_ops();
-}
-
 /****************************************************************************
  *
  * AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
                init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
                break;
        case IOMMU_INTERRUPTS_EN:
-               ret = amd_iommu_init_dma();
+               ret = amd_iommu_init_dma_ops();
                init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
                break;
        case IOMMU_DMA_OPS:
index 3465faf1809e4cb1d6630e5cdc8f87cd4e405bd2..f7b875bb70d42138027f49ebde8150d27ce14cd2 100644 (file)
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
 
 static void free_device_state(struct device_state *dev_state)
 {
+       struct iommu_group *group;
+
        /*
         * First detach device from domain - No more PRI requests will arrive
         * from that device after it is unbound from the IOMMUv2 domain.
         */
-       iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
+       group = iommu_group_get(&dev_state->pdev->dev);
+       if (WARN_ON(!group))
+               return;
+
+       iommu_detach_group(dev_state->domain, group);
+
+       iommu_group_put(group);
 
        /* Everything is down now, free the IOMMUv2 domain */
        iommu_domain_free(dev_state->domain);
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 {
        struct device_state *dev_state;
+       struct iommu_group *group;
        unsigned long flags;
        int ret, tmp;
        u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
        if (ret)
                goto out_free_domain;
 
-       ret = iommu_attach_device(dev_state->domain, &pdev->dev);
-       if (ret != 0)
+       group = iommu_group_get(&pdev->dev);
+       if (!group)
                goto out_free_domain;
 
+       ret = iommu_attach_group(dev_state->domain, group);
+       if (ret != 0)
+               goto out_drop_group;
+
+       iommu_group_put(group);
+
        spin_lock_irqsave(&state_lock, flags);
 
        if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
 
        return 0;
 
+out_drop_group:
+       iommu_group_put(group);
+
 out_free_domain:
        iommu_domain_free(dev_state->domain);
 
index 8e9ec81ce4bbd85473d6d6a35e7c3567569187ee..da902baaa7946aac569b7ebe8a316c647dfd8187 100644 (file)
  * Stream table.
  *
  * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
- * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus)
+ * 2lvl: 128k L1 entries,
+ *       256 lazy entries per table (each table covers a PCI bus)
  */
-#define STRTAB_L1_SZ_SHIFT             16
+#define STRTAB_L1_SZ_SHIFT             20
 #define STRTAB_SPLIT                   8
 
 #define STRTAB_L1_DESC_DWORDS          1
 #define ARM64_TCR_TG0_SHIFT            14
 #define ARM64_TCR_TG0_MASK             0x3UL
 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT   8
-#define ARM64_TCR_IRGN0_SHIFT          24
+#define ARM64_TCR_IRGN0_SHIFT          8
 #define ARM64_TCR_IRGN0_MASK           0x3UL
 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT   10
-#define ARM64_TCR_ORGN0_SHIFT          26
+#define ARM64_TCR_ORGN0_SHIFT          10
 #define ARM64_TCR_ORGN0_MASK           0x3UL
 #define CTXDESC_CD_0_TCR_SH0_SHIFT     12
 #define ARM64_TCR_SH0_SHIFT            12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_HYP              (1 << 12)
        u32                             features;
 
+#define ARM_SMMU_OPT_SKIP_PREFETCH     (1 << 0)
+       u32                             options;
+
        struct arm_smmu_cmdq            cmdq;
        struct arm_smmu_evtq            evtq;
        struct arm_smmu_priq            priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
 static LIST_HEAD(arm_smmu_devices);
 
+struct arm_smmu_option_prop {
+       u32 opt;
+       const char *prop;
+};
+
+static struct arm_smmu_option_prop arm_smmu_options[] = {
+       { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
+       { 0, NULL},
+};
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
        return container_of(dom, struct arm_smmu_domain, domain);
 }
 
+static void parse_driver_options(struct arm_smmu_device *smmu)
+{
+       int i = 0;
+
+       do {
+               if (of_property_read_bool(smmu->dev->of_node,
+                                               arm_smmu_options[i].prop)) {
+                       smmu->options |= arm_smmu_options[i].opt;
+                       dev_notice(smmu->dev, "option %s\n",
+                               arm_smmu_options[i].prop);
+               }
+       } while (arm_smmu_options[++i].opt);
+}
+
 /* Low-level queue manipulation functions */
 static bool queue_full(struct arm_smmu_queue *q)
 {
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
        arm_smmu_sync_ste_for_sid(smmu, sid);
 
        /* It's likely that we'll want to use the new STE soon */
-       arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
+       if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
+               arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
 }
 
 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
                return 0;
 
        size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
-       strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS];
+       strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
 
        desc->span = STRTAB_SPLIT + 1;
        desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
 {
        void *strtab;
        u64 reg;
-       u32 size;
+       u32 size, l1size;
        int ret;
        struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
        /* Calculate the L1 size, capped to the SIDSIZE */
        size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
        size = min(size, smmu->sid_bits - STRTAB_SPLIT);
-       if (size + STRTAB_SPLIT < smmu->sid_bits)
+       cfg->num_l1_ents = 1 << size;
+
+       size += STRTAB_SPLIT;
+       if (size < smmu->sid_bits)
                dev_warn(smmu->dev,
                         "2-level strtab only covers %u/%u bits of SID\n",
-                        size + STRTAB_SPLIT, smmu->sid_bits);
+                        size, smmu->sid_bits);
 
-       cfg->num_l1_ents = 1 << size;
-       size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
-       strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
+       l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
+       strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
                                     GFP_KERNEL);
        if (!strtab) {
                dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
        ret = arm_smmu_init_l1_strtab(smmu);
        if (ret)
                dma_free_coherent(smmu->dev,
-                                 cfg->num_l1_ents *
-                                 (STRTAB_L1_DESC_DWORDS << 3),
+                                 l1size,
                                  strtab,
                                  cfg->strtab_dma);
        return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
        if (irq > 0)
                smmu->gerr_irq = irq;
 
+       parse_driver_options(smmu);
+
        /* Probe the h/w */
        ret = arm_smmu_device_probe(smmu);
        if (ret)
index a98a7b27aca1dec2cb2f53319df8a49abcf8e645..0649b94f59584ca5b885cd0ecad595a84af0d89d 100644 (file)
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
 static void domain_exit(struct dmar_domain *domain)
 {
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
        struct page *freelist = NULL;
-       int i;
 
        /* Domain 0 is reserved, so dont process it */
        if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
 
        /* clear attached or cached domains */
        rcu_read_lock();
-       for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
-               iommu_detach_domain(domain, g_iommus[i]);
+       for_each_active_iommu(iommu, drhd)
+               if (domain_type_is_vm(domain) ||
+                   test_bit(iommu->seq_id, domain->iommu_bmp))
+                       iommu_detach_domain(domain, iommu);
        rcu_read_unlock();
 
        dma_free_pagelist(freelist);
index 1b7e155869f6c1a5f9ff361f246b6fd71539dadc..c00e2db351ba5aec327ebb1e98085b25b9b4d056 100644 (file)
@@ -75,6 +75,13 @@ struct its_node {
 
 #define ITS_ITT_ALIGN          SZ_256
 
+struct event_lpi_map {
+       unsigned long           *lpi_map;
+       u16                     *col_map;
+       irq_hw_number_t         lpi_base;
+       int                     nr_lpis;
+};
+
 /*
  * The ITS view of a device - belongs to an ITS, a collection, owns an
  * interrupt translation table, and a list of interrupts.
@@ -82,11 +89,8 @@ struct its_node {
 struct its_device {
        struct list_head        entry;
        struct its_node         *its;
-       struct its_collection   *collection;
+       struct event_lpi_map    event_map;
        void                    *itt;
-       unsigned long           *lpi_map;
-       irq_hw_number_t         lpi_base;
-       int                     nr_lpis;
        u32                     nr_ites;
        u32                     device_id;
 };
@@ -99,6 +103,14 @@ static struct rdists *gic_rdists;
 #define gic_data_rdist()               (raw_cpu_ptr(gic_rdists->rdist))
 #define gic_data_rdist_rd_base()       (gic_data_rdist()->rd_base)
 
+static struct its_collection *dev_event_to_col(struct its_device *its_dev,
+                                              u32 event)
+{
+       struct its_node *its = its_dev->its;
+
+       return its->collections + its_dev->event_map.col_map[event];
+}
+
 /*
  * ITS command descriptors - parameters to be encoded in a command
  * block.
@@ -134,7 +146,7 @@ struct its_cmd_desc {
                struct {
                        struct its_device *dev;
                        struct its_collection *col;
-                       u32 id;
+                       u32 event_id;
                } its_movi_cmd;
 
                struct {
@@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_mapd_cmd.dev->collection;
+       return NULL;
 }
 
 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
@@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
 static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
                                                  struct its_cmd_desc *desc)
 {
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_mapvi_cmd.dev,
+                              desc->its_mapvi_cmd.event_id);
+
        its_encode_cmd(cmd, GITS_CMD_MAPVI);
        its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
        its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
        its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
-       its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
+       its_encode_collection(cmd, col->col_id);
 
        its_fixup_cmd(cmd);
 
-       return desc->its_mapvi_cmd.dev->collection;
+       return col;
 }
 
 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
                                                 struct its_cmd_desc *desc)
 {
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_movi_cmd.dev,
+                              desc->its_movi_cmd.event_id);
+
        its_encode_cmd(cmd, GITS_CMD_MOVI);
        its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
-       its_encode_event_id(cmd, desc->its_movi_cmd.id);
+       its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
        its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
 
        its_fixup_cmd(cmd);
 
-       return desc->its_movi_cmd.dev->collection;
+       return col;
 }
 
 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
                                                    struct its_cmd_desc *desc)
 {
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_discard_cmd.dev,
+                              desc->its_discard_cmd.event_id);
+
        its_encode_cmd(cmd, GITS_CMD_DISCARD);
        its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
        its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
 
        its_fixup_cmd(cmd);
 
-       return desc->its_discard_cmd.dev->collection;
+       return col;
 }
 
 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
                                                struct its_cmd_desc *desc)
 {
+       struct its_collection *col;
+
+       col = dev_event_to_col(desc->its_inv_cmd.dev,
+                              desc->its_inv_cmd.event_id);
+
        its_encode_cmd(cmd, GITS_CMD_INV);
        its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
        its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
 
        its_fixup_cmd(cmd);
 
-       return desc->its_inv_cmd.dev->collection;
+       return col;
 }
 
 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
@@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev,
 
        desc.its_movi_cmd.dev = dev;
        desc.its_movi_cmd.col = col;
-       desc.its_movi_cmd.id = id;
+       desc.its_movi_cmd.event_id = id;
 
        its_send_single_command(dev->its, its_build_movi_cmd, &desc);
 }
@@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
 static inline u32 its_get_event_id(struct irq_data *d)
 {
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
-       return d->hwirq - its_dev->lpi_base;
+       return d->hwirq - its_dev->event_map.lpi_base;
 }
 
 static void lpi_set_config(struct irq_data *d, bool enable)
@@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 
        target_col = &its_dev->its->collections[cpu];
        its_send_movi(its_dev, target_col, id);
-       its_dev->collection = target_col;
+       its_dev->event_map.col_map[id] = cpu;
 
        return IRQ_SET_MASK_OK_DONE;
 }
@@ -713,8 +745,10 @@ out:
        return bitmap;
 }
 
-static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(struct event_lpi_map *map)
 {
+       int base = map->lpi_base;
+       int nr_ids = map->nr_lpis;
        int lpi;
 
        spin_lock(&lpi_lock);
@@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
 
        spin_unlock(&lpi_lock);
 
-       kfree(bitmap);
+       kfree(map->lpi_map);
+       kfree(map->col_map);
 }
 
 /*
@@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        struct its_device *dev;
        unsigned long *lpi_map;
        unsigned long flags;
+       u16 *col_map = NULL;
        void *itt;
        int lpi_base;
        int nr_lpis;
        int nr_ites;
-       int cpu;
        int sz;
 
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
        itt = kzalloc(sz, GFP_KERNEL);
        lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+       if (lpi_map)
+               col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
 
-       if (!dev || !itt || !lpi_map) {
+       if (!dev || !itt || !lpi_map || !col_map) {
                kfree(dev);
                kfree(itt);
                kfree(lpi_map);
+               kfree(col_map);
                return NULL;
        }
 
        dev->its = its;
        dev->itt = itt;
        dev->nr_ites = nr_ites;
-       dev->lpi_map = lpi_map;
-       dev->lpi_base = lpi_base;
-       dev->nr_lpis = nr_lpis;
+       dev->event_map.lpi_map = lpi_map;
+       dev->event_map.col_map = col_map;
+       dev->event_map.lpi_base = lpi_base;
+       dev->event_map.nr_lpis = nr_lpis;
        dev->device_id = dev_id;
        INIT_LIST_HEAD(&dev->entry);
 
@@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        list_add(&dev->entry, &its->its_device_list);
        raw_spin_unlock_irqrestore(&its->lock, flags);
 
-       /* Bind the device to the first possible CPU */
-       cpu = cpumask_first(cpu_online_mask);
-       dev->collection = &its->collections[cpu];
-
        /* Map device to its ITT */
        its_send_mapd(dev, 1);
 
@@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
 {
        int idx;
 
-       idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
-       if (idx == dev->nr_lpis)
+       idx = find_first_zero_bit(dev->event_map.lpi_map,
+                                 dev->event_map.nr_lpis);
+       if (idx == dev->event_map.nr_lpis)
                return -ENOSPC;
 
-       *hwirq = dev->lpi_base + idx;
-       set_bit(idx, dev->lpi_map);
+       *hwirq = dev->event_map.lpi_base + idx;
+       set_bit(idx, dev->event_map.lpi_map);
 
        return 0;
 }
@@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
                irq_domain_set_hwirq_and_chip(domain, virq + i,
                                              hwirq, &its_irq_chip, its_dev);
                dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
-                       (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
+                       (int)(hwirq - its_dev->event_map.lpi_base),
+                       (int)hwirq, virq + i);
        }
 
        return 0;
@@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain,
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        u32 event = its_get_event_id(d);
 
+       /* Bind the LPI to the first possible CPU */
+       its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+
        /* Map the GIC IRQ and event to the device */
        its_send_mapvi(its_dev, d->hwirq, event);
 }
@@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
                u32 event = its_get_event_id(data);
 
                /* Mark interrupt index as unused */
-               clear_bit(event, its_dev->lpi_map);
+               clear_bit(event, its_dev->event_map.lpi_map);
 
                /* Nuke the entry in the domain */
                irq_domain_reset_irq_data(data);
        }
 
        /* If all interrupts have been freed, start mopping the floor */
-       if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
-               its_lpi_free(its_dev->lpi_map,
-                            its_dev->lpi_base,
-                            its_dev->nr_lpis);
+       if (bitmap_empty(its_dev->event_map.lpi_map,
+                        its_dev->event_map.nr_lpis)) {
+               its_lpi_free(&its_dev->event_map);
 
                /* Unmap device/itt */
                its_send_mapd(its_dev, 0);
index a45121546caff05acff16beb8d1dd2f4aa0d35c2..acb721b31bcfc4972921ff4c7f6afe1e1c8026cc 100644 (file)
@@ -2,7 +2,7 @@
  * SPEAr platform shared irq layer source file
  *
  * Copyright (C) 2009-2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * Copyright (C) 2012 ST Microelectronics
  * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
index 8c91fd5eb6fdd4696f8b6d298eef8684525fc94e..375be509e95f5bd302da79446cb8867f8ecb8ec7 100644 (file)
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
        cs->hw.ser->tty = tty;
        atomic_set(&cs->hw.ser->refcnt, 1);
        init_completion(&cs->hw.ser->dead_cmp);
-
        tty->disc_data = cs;
 
+       /* Set the amount of data we're willing to receive per call
+        * from the hardware driver to half of the input buffer size
+        * to leave some reserve.
+        * Note: We don't do flow control towards the hardware driver.
+        * If more data is received than will fit into the input buffer,
+        * it will be dropped and an error will be logged. This should
+        * never happen as the device is slow and the buffer size ample.
+        */
+       tty->receive_room = RBUFSIZE/2;
+
        /* OK.. Initialization of the datastructures and the HW is done.. Now
         * startup system and notify the LL that we are ready to run
         */
@@ -597,28 +606,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
        return 0;
 }
 
-/*
- * Read on the tty.
- * Unused, received data goes only to the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_read(struct tty_struct *tty, struct file *file,
-                unsigned char __user *buf, size_t count)
-{
-       return -EAGAIN;
-}
-
-/*
- * Write on the tty.
- * Unused, transmit data comes only from the Gigaset driver.
- */
-static ssize_t
-gigaset_tty_write(struct tty_struct *tty, struct file *file,
-                 const unsigned char *buf, size_t count)
-{
-       return -EAGAIN;
-}
-
 /*
  * Ioctl on the tty.
  * Called in process context only.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
        .open           = gigaset_tty_open,
        .close          = gigaset_tty_close,
        .hangup         = gigaset_tty_hangup,
-       .read           = gigaset_tty_read,
-       .write          = gigaset_tty_write,
        .ioctl          = gigaset_tty_ioctl,
        .receive_buf    = gigaset_tty_receive,
        .write_wakeup   = gigaset_tty_wakeup,
index b59727309072462fb3d0dc42935000d9570a7589..bfec3bdfe59847c8f3553e178a8b33a1c5a708fd 100644 (file)
@@ -259,7 +259,7 @@ config DM_CRYPT
          the ciphers you're going to use in the cryptoapi configuration.
 
          For further information on dm-crypt and userspace tools see:
-         <http://code.google.com/p/cryptsetup/wiki/DMCrypt>
+         <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
 
          To compile this code as a module, choose M here: the module will
          be called dm-crypt.
index a08e3eeac3c5fbf389ede37b839f40f705cd4d47..79a6d63e8ed3dce7b5bf936face73542af0a77d8 100644 (file)
@@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list)
 do {                                                                   \
        set_closure_fn(_cl, _fn, _wq);                                  \
        closure_sub(_cl, CLOSURE_RUNNING + 1);                          \
-       return;                                                         \
 } while (0)
 
 /**
@@ -349,7 +348,6 @@ do {                                                                        \
 do {                                                                   \
        set_closure_fn(_cl, _fn, _wq);                                  \
        closure_queue(_cl);                                             \
-       return;                                                         \
 } while (0)
 
 /**
@@ -365,7 +363,6 @@ do {                                                                        \
 do {                                                                   \
        set_closure_fn(_cl, _destructor, NULL);                         \
        closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1);     \
-       return;                                                         \
 } while (0)
 
 /**
index cb64e64a478954e5c7dec8f774df29852fad0e01..bf6a9ca18403f3a8bb0d289f43235eb59d79c786 100644 (file)
@@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
        } while (n != bio);
 
        continue_at(&s->cl, bch_bio_submit_split_done, NULL);
+       return;
 submit:
        generic_make_request(bio);
 }
index ce64fc8512518c6bff63b9a551d931341aaf6f84..418607a6ba33442c09b85dc9ea2070fee88de0ca 100644 (file)
@@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl)
 
        if (!w->need_write) {
                closure_return_with_destructor(cl, journal_write_unlock);
+               return;
        } else if (journal_full(&c->journal)) {
                journal_reclaim(c);
                spin_unlock(&c->journal.lock);
 
                btree_flush_write(c);
                continue_at(cl, journal_write, system_wq);
+               return;
        }
 
        c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
index 4afb2d26b148a3a41ca55171dee89d75da64e4f4..f292790997d72b98fc633dd3da089d93443fbdfa 100644 (file)
@@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
        if (journal_ref)
                atomic_dec_bug(journal_ref);
 
-       if (!op->insert_data_done)
+       if (!op->insert_data_done) {
                continue_at(cl, bch_data_insert_start, op->wq);
+               return;
+       }
 
        bch_keylist_free(&op->insert_keys);
        closure_return(cl);
@@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
                /* 1 for the device pointer and 1 for the chksum */
                if (bch_keylist_realloc(&op->insert_keys,
                                        3 + (op->csum ? 1 : 0),
-                                       op->c))
+                                       op->c)) {
                        continue_at(cl, bch_data_insert_keys, op->wq);
+                       return;
+               }
 
                k = op->insert_keys.top;
                bkey_init(k);
@@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
 
        op->insert_data_done = true;
        continue_at(cl, bch_data_insert_keys, op->wq);
+       return;
 err:
        /* bch_alloc_sectors() blocks if s->writeback = true */
        BUG_ON(op->writeback);
@@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl)
        ret = bch_btree_map_keys(&s->op, s->iop.c,
                                 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
                                 cache_lookup_fn, MAP_END_KEY);
-       if (ret == -EAGAIN)
+       if (ret == -EAGAIN) {
                continue_at(cl, cache_lookup, bcache_wq);
+               return;
+       }
 
        closure_return(cl);
 }
@@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
                continue_at_nobarrier(&s->cl,
                                      flash_dev_nodata,
                                      bcache_wq);
+               return;
        } else if (rw) {
                bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
                                        &KEY(d->id, bio->bi_iter.bi_sector, 0),
index ed2346ddf4c9fb54dafeb92ae9c795a0584444e8..e51de52eeb94f71c9d6712a61d31e49f8e6f2f60 100644 (file)
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        bitmap_super_t *sb;
        unsigned long chunksize, daemon_sleep, write_behind;
 
-       bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
+       bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (bitmap->storage.sb_page == NULL)
                return -ENOMEM;
        bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
        sb->state = cpu_to_le32(bitmap->flags);
        bitmap->events_cleared = bitmap->mddev->events;
        sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
+       bitmap->mddev->bitmap_info.nodes = 0;
 
        kunmap_atomic(sb);
 
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
        unsigned long sectors_reserved = 0;
        int err = -EINVAL;
        struct page *sb_page;
+       loff_t offset = bitmap->mddev->bitmap_info.offset;
 
        if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
                chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
                bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
                /* to 4k blocks */
                bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
-               bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3);
+               offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
                pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
-                       bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset);
+                       bitmap->cluster_slot, offset);
        }
 
        if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
                                bitmap, bytes, sb_page);
        } else {
                err = read_sb_page(bitmap->mddev,
-                                  bitmap->mddev->bitmap_info.offset,
+                                  offset,
                                   sb_page,
                                   0, sizeof(bitmap_super_t));
        }
@@ -611,8 +613,16 @@ re_read:
        daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
        write_behind = le32_to_cpu(sb->write_behind);
        sectors_reserved = le32_to_cpu(sb->sectors_reserved);
-       nodes = le32_to_cpu(sb->nodes);
-       strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
+       /* XXX: This is a hack to ensure that we don't use clustering
+        *  in case:
+        *      - dm-raid is in use and
+        *      - the nodes written in bitmap_sb is erroneous.
+        */
+       if (!bitmap->mddev->sync_super) {
+               nodes = le32_to_cpu(sb->nodes);
+               strlcpy(bitmap->mddev->bitmap_info.cluster_name,
+                               sb->cluster_name, 64);
+       }
 
        /* verify that the bitmap-specific fields are valid */
        if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
        kunmap_atomic(sb);
        /* Assiging chunksize is required for "re_read" */
        bitmap->mddev->bitmap_info.chunksize = chunksize;
-       if (nodes && (bitmap->cluster_slot < 0)) {
+       if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
                err = md_setup_cluster(bitmap->mddev, nodes);
                if (err) {
                        pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
        if (IS_ERR(bitmap))
                return PTR_ERR(bitmap);
 
-       rv = bitmap_read_sb(bitmap);
-       if (rv)
-               goto err;
-
        rv = bitmap_init_from_disk(bitmap, 0);
        if (rv)
                goto err;
index b6f22651dd356e7e5b4d0bed4268f4ac9dd62772..48a4a826ae07649419d033b99c564b2adb9da6ea 100644 (file)
@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
 
        if (from_cblock(cache_size)) {
                mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
-               if (!mq->cache_hit_bits && mq->cache_hit_bits) {
+               if (!mq->cache_hit_bits) {
                        DMERR("couldn't allocate cache hit bitset");
                        goto bad_cache_hit_bits;
                }
index 1b4e1756b169dab9d7377dcea444087176830652..1fe93cfea7d309a659d79fe2b953b5f2dbe7b466 100644 (file)
@@ -1947,6 +1947,7 @@ static int commit_if_needed(struct cache *cache)
 
 static void process_deferred_bios(struct cache *cache)
 {
+       bool prealloc_used = false;
        unsigned long flags;
        struct bio_list bios;
        struct bio *bio;
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
                 * this bio might require one, we pause until there are some
                 * prepared mappings to process.
                 */
+               prealloc_used = true;
                if (prealloc_data_structs(cache, &structs)) {
                        spin_lock_irqsave(&cache->lock, flags);
                        bio_list_merge(&cache->deferred_bios, &bios);
@@ -1983,11 +1985,13 @@ static void process_deferred_bios(struct cache *cache)
                        process_bio(cache, &structs, bio);
        }
 
-       prealloc_free_structs(cache, &structs);
+       if (prealloc_used)
+               prealloc_free_structs(cache, &structs);
 }
 
 static void process_deferred_cells(struct cache *cache)
 {
+       bool prealloc_used = false;
        unsigned long flags;
        struct dm_bio_prison_cell *cell, *tmp;
        struct list_head cells;
@@ -2007,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
                 * this bio might require one, we pause until there are some
                 * prepared mappings to process.
                 */
+               prealloc_used = true;
                if (prealloc_data_structs(cache, &structs)) {
                        spin_lock_irqsave(&cache->lock, flags);
                        list_splice(&cells, &cache->deferred_cells);
@@ -2017,7 +2022,8 @@ static void process_deferred_cells(struct cache *cache)
                process_cell(cache, &structs, cell);
        }
 
-       prealloc_free_structs(cache, &structs);
+       if (prealloc_used)
+               prealloc_free_structs(cache, &structs);
 }
 
 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
@@ -2062,7 +2068,7 @@ static void process_deferred_writethrough_bios(struct cache *cache)
 
 static void writeback_some_dirty_blocks(struct cache *cache)
 {
-       int r = 0;
+       bool prealloc_used = false;
        dm_oblock_t oblock;
        dm_cblock_t cblock;
        struct prealloc structs;
@@ -2072,15 +2078,12 @@ static void writeback_some_dirty_blocks(struct cache *cache)
        memset(&structs, 0, sizeof(structs));
 
        while (spare_migration_bandwidth(cache)) {
-               if (prealloc_data_structs(cache, &structs))
-                       break;
-
-               r = policy_writeback_work(cache->policy, &oblock, &cblock, busy);
-               if (r)
-                       break;
+               if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
+                       break; /* no work to do */
 
-               r = get_cell(cache, oblock, &structs, &old_ocell);
-               if (r) {
+               prealloc_used = true;
+               if (prealloc_data_structs(cache, &structs) ||
+                   get_cell(cache, oblock, &structs, &old_ocell)) {
                        policy_set_dirty(cache->policy, oblock);
                        break;
                }
@@ -2088,7 +2091,8 @@ static void writeback_some_dirty_blocks(struct cache *cache)
                writeback(cache, &structs, oblock, cblock, old_ocell);
        }
 
-       prealloc_free_structs(cache, &structs);
+       if (prealloc_used)
+               prealloc_free_structs(cache, &structs);
 }
 
 /*----------------------------------------------------------------
@@ -3496,7 +3500,7 @@ static void cache_resume(struct dm_target *ti)
  * <#demotions> <#promotions> <#dirty>
  * <#features> <features>*
  * <#core args> <core args>
- * <policy name> <#policy args> <policy args>* <cache metadata mode>
+ * <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
  */
 static void cache_status(struct dm_target *ti, status_type_t type,
                         unsigned status_flags, char *result, unsigned maxlen)
@@ -3582,6 +3586,11 @@ static void cache_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("rw ");
 
+               if (dm_cache_metadata_needs_check(cache->cmd))
+                       DMEMIT("needs_check ");
+               else
+                       DMEMIT("- ");
+
                break;
 
        case STATUSTYPE_TABLE:
@@ -3820,7 +3829,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type cache_target = {
        .name = "cache",
-       .version = {1, 7, 0},
+       .version = {1, 8, 0},
        .module = THIS_MODULE,
        .ctr = cache_ctr,
        .dtr = cache_dtr,
index c33f61a4cc28d8b772b9444b710e7f55fa6df2fb..d2bbe8cc1e9786b66af798df9d8666d3fb96223c 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/sort.h>
 #include <linux/rbtree.h>
 
@@ -268,7 +269,7 @@ struct pool {
        process_mapping_fn process_prepared_mapping;
        process_mapping_fn process_prepared_discard;
 
-       struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
+       struct dm_bio_prison_cell **cell_sort_array;
 };
 
 static enum pool_mode get_pool_mode(struct pool *pool);
@@ -665,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
        requeue_deferred_cells(tc);
 }
 
-static void error_retry_list(struct pool *pool)
+static void error_retry_list_with_code(struct pool *pool, int error)
 {
        struct thin_c *tc;
 
        rcu_read_lock();
        list_for_each_entry_rcu(tc, &pool->active_thins, list)
-               error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
+               error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
        rcu_read_unlock();
 }
 
+static void error_retry_list(struct pool *pool)
+{
+       return error_retry_list_with_code(pool, -EIO);
+}
+
 /*
  * This section of code contains the logic for processing a thin device's IO.
  * Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -2281,18 +2287,23 @@ static void do_waker(struct work_struct *ws)
        queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
 }
 
+static void notify_of_pool_mode_change_to_oods(struct pool *pool);
+
 /*
  * We're holding onto IO to allow userland time to react.  After the
  * timeout either the pool will have been resized (and thus back in
- * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
+ * PM_WRITE mode), or we degrade to PM_OUT_OF_DATA_SPACE w/ error_if_no_space.
  */
 static void do_no_space_timeout(struct work_struct *ws)
 {
        struct pool *pool = container_of(to_delayed_work(ws), struct pool,
                                         no_space_timeout);
 
-       if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
-               set_pool_mode(pool, PM_READ_ONLY);
+       if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
+               pool->pf.error_if_no_space = true;
+               notify_of_pool_mode_change_to_oods(pool);
+               error_retry_list_with_code(pool, -ENOSPC);
+       }
 }
 
 /*----------------------------------------------------------------*/
@@ -2370,6 +2381,14 @@ static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
               dm_device_name(pool->pool_md), new_mode);
 }
 
+static void notify_of_pool_mode_change_to_oods(struct pool *pool)
+{
+       if (!pool->pf.error_if_no_space)
+               notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
+       else
+               notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
+}
+
 static bool passdown_enabled(struct pool_c *pt)
 {
        return pt->adjusted_pf.discard_passdown;
@@ -2454,7 +2473,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                 * frequently seeing this mode.
                 */
                if (old_mode != new_mode)
-                       notify_of_pool_mode_change(pool, "out-of-data-space");
+                       notify_of_pool_mode_change_to_oods(pool);
                pool->process_bio = process_bio_read_only;
                pool->process_discard = process_discard_bio;
                pool->process_cell = process_cell_read_only;
@@ -2777,6 +2796,7 @@ static void __pool_destroy(struct pool *pool)
 {
        __pool_table_remove(pool);
 
+       vfree(pool->cell_sort_array);
        if (dm_pool_metadata_close(pool->pmd) < 0)
                DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
 
@@ -2889,6 +2909,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,
                goto bad_mapping_pool;
        }
 
+       pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
+       if (!pool->cell_sort_array) {
+               *error = "Error allocating cell sort array";
+               err_p = ERR_PTR(-ENOMEM);
+               goto bad_sort_array;
+       }
+
        pool->ref_count = 1;
        pool->last_commit_jiffies = jiffies;
        pool->pool_md = pool_md;
@@ -2897,6 +2924,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 
        return pool;
 
+bad_sort_array:
+       mempool_destroy(pool->mapping_pool);
 bad_mapping_pool:
        dm_deferred_set_destroy(pool->all_io_ds);
 bad_all_io_ds:
@@ -3714,6 +3743,7 @@ static void emit_flags(struct pool_features *pf, char *result,
  * Status line is:
  *    <transaction id> <used metadata sectors>/<total metadata sectors>
  *    <used data sectors>/<total data sectors> <held metadata root>
+ *    <pool mode> <discard config> <no space config> <needs_check>
  */
 static void pool_status(struct dm_target *ti, status_type_t type,
                        unsigned status_flags, char *result, unsigned maxlen)
@@ -3815,6 +3845,11 @@ static void pool_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("queue_if_no_space ");
 
+               if (dm_pool_metadata_needs_check(pool->pmd))
+                       DMEMIT("needs_check ");
+               else
+                       DMEMIT("- ");
+
                break;
 
        case STATUSTYPE_TABLE:
@@ -3918,7 +3953,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 15, 0},
+       .version = {1, 16, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -4305,7 +4340,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 15, 0},
+       .version = {1, 16, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
index f331d888e7f5a52cd5a2ada7498a5051bd8e38ca..ab37ae114e943c20c161f88b8c2a739206bfafab 100644 (file)
@@ -1067,13 +1067,10 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
  */
 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
 {
-       int nr_requests_pending;
-
        atomic_dec(&md->pending[rw]);
 
        /* nudge anyone waiting on suspend queue */
-       nr_requests_pending = md_in_flight(md);
-       if (!nr_requests_pending)
+       if (!md_in_flight(md))
                wake_up(&md->wait);
 
        /*
@@ -1085,8 +1082,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
        if (run_queue) {
                if (md->queue->mq_ops)
                        blk_mq_run_hw_queues(md->queue, true);
-               else if (!nr_requests_pending ||
-                        (nr_requests_pending >= md->queue->nr_congestion_on))
+               else
                        blk_run_queue_async(md->queue);
        }
 
@@ -2281,8 +2277,6 @@ static void dm_init_old_md_queue(struct mapped_device *md)
 
 static void cleanup_mapped_device(struct mapped_device *md)
 {
-       cleanup_srcu_struct(&md->io_barrier);
-
        if (md->wq)
                destroy_workqueue(md->wq);
        if (md->kworker_task)
@@ -2294,6 +2288,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
        if (md->bs)
                bioset_free(md->bs);
 
+       cleanup_srcu_struct(&md->io_barrier);
+
        if (md->disk) {
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
index fcfc4b9b26728029e59023ae3762d271b8f7a676..0072190515e0f6edca1e09718dae5102b3e7d274 100644 (file)
@@ -44,6 +44,7 @@ struct resync_info {
 
 /* md_cluster_info flags */
 #define                MD_CLUSTER_WAITING_FOR_NEWDISK          1
+#define                MD_CLUSTER_SUSPEND_READ_BALANCING       2
 
 
 struct md_cluster_info {
@@ -275,6 +276,9 @@ clear_bit:
 
 static void recover_prep(void *arg)
 {
+       struct mddev *mddev = arg;
+       struct md_cluster_info *cinfo = mddev->cluster_info;
+       set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
 }
 
 static void recover_slot(void *arg, struct dlm_slot *slot)
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
 
        cinfo->slot_number = our_slot;
        complete(&cinfo->completion);
+       clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
 }
 
 static const struct dlm_lockspace_ops md_ls_ops = {
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
        resync_send(mddev, RESYNCING, 0, 0);
 }
 
-static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi)
+static int area_resyncing(struct mddev *mddev, int direction,
+               sector_t lo, sector_t hi)
 {
        struct md_cluster_info *cinfo = mddev->cluster_info;
        int ret = 0;
        struct suspend_info *s;
 
+       if ((direction == READ) &&
+               test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
+               return 1;
+
        spin_lock_irq(&cinfo->suspend_lock);
        if (list_empty(&cinfo->suspend_list))
                goto out;
index 6817ee00e053d7d7e74b734185414c0cb3b18a38..00defe2badbc7952ec7d4cf358ad0408a2460b7f 100644 (file)
@@ -18,7 +18,7 @@ struct md_cluster_operations {
        int (*metadata_update_start)(struct mddev *mddev);
        int (*metadata_update_finish)(struct mddev *mddev);
        int (*metadata_update_cancel)(struct mddev *mddev);
-       int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi);
+       int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
        int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
        int (*add_new_disk_finish)(struct mddev *mddev);
        int (*new_disk_ack)(struct mddev *mddev, bool ack);
index d429c30cd51471c26cb1c07cb3e6a413106133d4..0c2a4e8b873c659dbc260b2aa5484c7a5e87b176 100644 (file)
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
 {
        struct md_personality *pers = mddev->pers;
        mddev_detach(mddev);
+       /* Ensure ->event_work is done */
+       flush_workqueue(md_misc_wq);
        spin_lock(&mddev->lock);
        mddev->ready = 0;
        mddev->pers = NULL;
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
        err = request_module("md-cluster");
        if (err) {
                pr_err("md-cluster module not found.\n");
-               return err;
+               return -ENOENT;
        }
 
        spin_lock(&pers_lock);
index e04cfd2d60ef9b2d80ee89baa0fb2136bf3cd2ab..9836c0ae897c33c4e227bca77cc95026c193f73c 100644 (file)
@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
 
                if (s < 0 && nr_center < -s) {
                        /* not enough in central node */
-                       shift(left, center, nr_center);
-                       s = nr_center - target;
+                       shift(left, center, -nr_center);
+                       s += nr_center;
                        shift(left, right, s);
                        nr_right += s;
                } else
@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
                if (s > 0 && nr_center < s) {
                        /* not enough in central node */
                        shift(center, right, nr_center);
-                       s = target - nr_center;
+                       s -= nr_center;
                        shift(left, right, s);
                        nr_left -= s;
                } else
index 200ac12a1d407b5c648c7271a995a022ac8e7efc..fdd3793e22f957ef08db71f897607c68ce6eb6a3 100644 (file)
@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
        int r;
        struct del_stack *s;
 
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       s = kmalloc(sizeof(*s), GFP_NOIO);
        if (!s)
                return -ENOMEM;
        s->info = info;
index f80f1af61ce70bce15a72d242d87c1c34da49a79..94f5b55069e09610f21ea640f5dff3efd7e580ca 100644 (file)
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
                spin_lock_irqsave(&conf->device_lock, flags);
                if (r1_bio->mddev->degraded == conf->raid_disks ||
                    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
-                    !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
+                    test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
                        uptodate = 1;
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
 
        if ((conf->mddev->recovery_cp < this_sector + sectors) ||
            (mddev_is_clustered(conf->mddev) &&
-           md_cluster_ops->area_resyncing(conf->mddev, this_sector,
+           md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
                    this_sector + sectors)))
                choose_first = 1;
        else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
            ((bio_end_sector(bio) > mddev->suspend_lo &&
            bio->bi_iter.bi_sector < mddev->suspend_hi) ||
            (mddev_is_clustered(mddev) &&
-            md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
+            md_cluster_ops->area_resyncing(mddev, WRITE,
+                    bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
                /* As the suspend_* range is controlled by
                 * userspace, we want an interruptible
                 * wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
                        if (bio_end_sector(bio) <= mddev->suspend_lo ||
                            bio->bi_iter.bi_sector >= mddev->suspend_hi ||
                            (mddev_is_clustered(mddev) &&
-                            !md_cluster_ops->area_resyncing(mddev,
+                            !md_cluster_ops->area_resyncing(mddev, WRITE,
                                     bio->bi_iter.bi_sector, bio_end_sector(bio))))
                                break;
                        schedule();
index 940f2f3654617918d8eef951262c3ca120ab83ce..38c58e19cfce3d7bdea554b26474080a88e02cca 100644 (file)
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
                        /* far_copies must be 1 */
                        conf->prev.stride = conf->dev_sectors;
        }
+       conf->reshape_safe = conf->reshape_progress;
        spin_lock_init(&conf->device_lock);
        INIT_LIST_HEAD(&conf->retry_list);
 
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
                }
                conf->offset_diff = min_offset_diff;
 
-               conf->reshape_safe = conf->reshape_progress;
                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
                conf->reshape_progress = size;
        } else
                conf->reshape_progress = 0;
+       conf->reshape_safe = conf->reshape_progress;
        spin_unlock_irq(&conf->device_lock);
 
        if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4171,7 @@ abort:
                rdev->new_data_offset = rdev->data_offset;
        smp_wmb();
        conf->reshape_progress = MaxSector;
+       conf->reshape_safe = MaxSector;
        mddev->reshape_position = MaxSector;
        spin_unlock_irq(&conf->device_lock);
        return ret;
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
        md_finish_reshape(conf->mddev);
        smp_wmb();
        conf->reshape_progress = MaxSector;
+       conf->reshape_safe = MaxSector;
        spin_unlock_irq(&conf->device_lock);
 
        /* read-ahead size must cover two whole stripes, which is
index 59e44e99eef3bacd4703fd6883513688f7c58b09..643d217bfa13ac8caa3dee9f9dd65d57f165bbe8 100644 (file)
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        if (!sc)
                return -ENOMEM;
 
+       /* Need to ensure auto-resizing doesn't interfere */
+       mutex_lock(&conf->cache_size_mutex);
+
        for (i = conf->max_nr_stripes; i; i--) {
                nsh = alloc_stripe(sc, GFP_KERNEL);
                if (!nsh)
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                        kmem_cache_free(sc, nsh);
                }
                kmem_cache_destroy(sc);
+               mutex_unlock(&conf->cache_size_mutex);
                return -ENOMEM;
        }
        /* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        } else
                err = -ENOMEM;
 
+       mutex_unlock(&conf->cache_size_mutex);
        /* Step 4, return new stripes to service */
        while(!list_empty(&newstripes)) {
                nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                                 &first_bad, &bad_sectors))
                        set_bit(R5_ReadRepl, &dev->flags);
                else {
-                       if (rdev)
+                       if (rdev && !test_bit(Faulty, &rdev->flags))
                                set_bit(R5_NeedReplace, &dev->flags);
+                       else
+                               clear_bit(R5_NeedReplace, &dev->flags);
                        rdev = rcu_dereference(conf->disks[i].rdev);
                        clear_bit(R5_ReadRepl, &dev->flags);
                }
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
        pr_debug("%d stripes handled\n", handled);
 
        spin_unlock_irq(&conf->device_lock);
-       if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
+       if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
+           mutex_trylock(&conf->cache_size_mutex)) {
                grow_one_stripe(conf, __GFP_NOWARN);
                /* Set flag even if allocation failed.  This helps
                 * slow down allocation requests when mem is short
                 */
                set_bit(R5_DID_ALLOC, &conf->cache_state);
+               mutex_unlock(&conf->cache_size_mutex);
        }
 
        async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
                return -EINVAL;
 
        conf->min_nr_stripes = size;
+       mutex_lock(&conf->cache_size_mutex);
        while (size < conf->max_nr_stripes &&
               drop_one_stripe(conf))
                ;
+       mutex_unlock(&conf->cache_size_mutex);
 
 
        err = md_allow_write(mddev);
        if (err)
                return err;
 
+       mutex_lock(&conf->cache_size_mutex);
        while (size > conf->max_nr_stripes)
                if (!grow_one_stripe(conf, GFP_KERNEL))
                        break;
+       mutex_unlock(&conf->cache_size_mutex);
 
        return 0;
 }
@@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
                                      struct shrink_control *sc)
 {
        struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
-       int ret = 0;
-       while (ret < sc->nr_to_scan) {
-               if (drop_one_stripe(conf) == 0)
-                       return SHRINK_STOP;
-               ret++;
+       unsigned long ret = SHRINK_STOP;
+
+       if (mutex_trylock(&conf->cache_size_mutex)) {
+               ret= 0;
+               while (ret < sc->nr_to_scan) {
+                       if (drop_one_stripe(conf) == 0) {
+                               ret = SHRINK_STOP;
+                               break;
+                       }
+                       ret++;
+               }
+               mutex_unlock(&conf->cache_size_mutex);
        }
        return ret;
 }
@@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
                goto abort;
        spin_lock_init(&conf->device_lock);
        seqcount_init(&conf->gen_lock);
+       mutex_init(&conf->cache_size_mutex);
        init_waitqueue_head(&conf->wait_for_quiescent);
        for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
                init_waitqueue_head(&conf->wait_for_stripe[i]);
index 02c3bf8fbfe7aa1b0a1c568605393dc3769c6b18..d05144278690ca9b8cb4ad933d6d23dc36b9b0f8 100644 (file)
@@ -482,7 +482,8 @@ struct r5conf {
         */
        int                     active_name;
        char                    cache_name[2][32];
-       struct kmem_cache               *slab_cache; /* for allocating stripes */
+       struct kmem_cache       *slab_cache; /* for allocating stripes */
+       struct mutex            cache_size_mutex; /* Protect changes to cache size */
 
        int                     seq_flush, seq_write;
        int                     quiesce;
index 4cb365d4ffdcc9c4e12cde82f46d411cb86c7a13..8b95eefb610b4097787a7363c6e43b94ce260efb 100644 (file)
@@ -38,6 +38,8 @@
     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
 {
        int rc;
 
+#ifdef CONFIG_X86_64
+       if (pat_enabled()) {
+               pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
+               return -ENODEV;
+       }
+#endif
+
        if (itv->osd_info) {
                IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
                return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
        int registered = 0;
        int err;
 
-#ifdef CONFIG_X86_64
-       if (WARN(pat_enabled(),
-                "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
-               return -ENODEV;
-       }
-#endif
 
        if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
                printk(KERN_ERR "ivtvfb:  ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
index 5c054031c3f86a80a1aee942f395e56efc8f6b36..e14c8c9d189ba7dfbd20d3623df79f847ed3e9f7 100644 (file)
@@ -6,7 +6,7 @@
  *
  * License Terms: GNU General Public License, version 2
  * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
+ * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
  */
 
 #include <linux/i2c.h>
index a81badbaa917dac49260ae1e10c7b08ec0b1a524..6fdb30e84a2bb932bfa0b3650f179a962e98b9cc 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) ST Microelectronics SA 2011
  *
  * License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
+ * Author: Viresh Kumar <vireshk@kernel.org> for ST Microelectronics
  */
 
 #include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
index 8eb0a9500a90626b5605025309b1abf53e2ee7cb..e9513d651cd36b611920a2f6410c905b344fbf13 100644 (file)
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
        /* Fill in the data structures */
        devno = MKDEV(MAJOR(mei_devt), dev->minor);
        cdev_init(&dev->cdev, &mei_fops);
-       dev->cdev.owner = mei_fops.owner;
+       dev->cdev.owner = parent->driver->owner;
 
        /* Add the device */
        ret = cdev_add(&dev->cdev, devno, 1);
index 41e3bdb100611ab2f20836d0f8d7e3a16d9b068a..6dfdae3452d609fd07c82cce24f933771a093a70 100644 (file)
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
 }
 
 static struct scatterlist *
-scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
+scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
 {
        struct scatterlist *sg;
        struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
                return NULL;
        sg_init_table(sg, page_cnt);
        for (i = 0; i < page_cnt; i++) {
-               page = vmalloc_to_page((void __force *)va);
-               if (!page)
-                       goto p2p_sg_err;
+               page = pfn_to_page(pa >> PAGE_SHIFT);
                sg_set_page(&sg[i], page, page_size, 0);
-               va += page_size;
+               pa += page_size;
        }
        return sg;
-p2p_sg_err:
-       kfree(sg);
-       return NULL;
 }
 
 /* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
        p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
        if (!p2p)
                return NULL;
-       p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va,
+       p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
                                                    PAGE_SIZE, num_mmio_pages);
        if (!p2p->ppi_sg[SCIF_PPI_MMIO])
                goto free_p2p;
        p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
        sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
        num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
-       p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va,
+       p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
                                                    1 << sg_page_shift,
                                                    num_aper_chunks);
        p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
index c9c3d20b784b669bf130cffb716b8525470738fa..a1b820fcb2a6ff60093011d696761ba4ca15e584 100644 (file)
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
 
        ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
 
+       mmc_blk_put(md);
+
        return ret;
 }
 
index fd9a58e216a50ffde01e8e9fb7610ef09c417af3..6a0f9c79be2652bdf843c40692e7dd188d76567a 100644 (file)
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI
 
 config MMC_MTK
        tristate "MediaTek SD/MMC Card Interface support"
+       depends on HAS_DMA
        help
          This selects the MediaTek(R) Secure digital and Multimedia card Interface.
          If you have a machine with a integrated SD/MMC card reader, say Y or M here.
index b2b411da297b06e73441f8dd51c8bae0b004bcc0..4d120323689043f21c522b44389757f8391d45fc 100644 (file)
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
 
                if (status & (CTO_EN | CCRC_EN))
                        end_cmd = 1;
+               if (host->data || host->response_busy) {
+                       end_trans = !end_cmd;
+                       host->response_busy = 0;
+               }
                if (status & (CTO_EN | DTO_EN))
                        hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
-               else if (status & (CCRC_EN | DCRC_EN))
+               else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
+                                  BADA_EN))
                        hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
 
                if (status & ACE_EN) {
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
                        }
                        dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
                }
-               if (host->data || host->response_busy) {
-                       end_trans = !end_cmd;
-                       host->response_busy = 0;
-               }
        }
 
        OMAP_HSMMC_WRITE(host->base, STAT, status);
index faf0cb910c968abcce26c431422adff3e438f81d..c6b9f6492e1a2529b7f683686bc4d939229dba63 100644 (file)
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
 static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
 {
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
-       struct pltfm_imx_data *imx_data = pltfm_host->priv;
-       struct esdhc_platform_data *boarddata = &imx_data->boarddata;
 
-       if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock))
-               return boarddata->f_max;
-       else
-               return pltfm_host->clock;
+       return pltfm_host->clock;
 }
 
 static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
 static int
 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
                         struct sdhci_host *host,
-                        struct esdhc_platform_data *boarddata)
+                        struct pltfm_imx_data *imx_data)
 {
        struct device_node *np = pdev->dev.of_node;
-
-       if (!np)
-               return -ENODEV;
-
-       if (of_get_property(np, "non-removable", NULL))
-               boarddata->cd_type = ESDHC_CD_PERMANENT;
-
-       if (of_get_property(np, "fsl,cd-controller", NULL))
-               boarddata->cd_type = ESDHC_CD_CONTROLLER;
+       struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+       int ret;
 
        if (of_get_property(np, "fsl,wp-controller", NULL))
                boarddata->wp_type = ESDHC_WP_CONTROLLER;
 
-       boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
-       if (gpio_is_valid(boarddata->cd_gpio))
-               boarddata->cd_type = ESDHC_CD_GPIO;
-
        boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
        if (gpio_is_valid(boarddata->wp_gpio))
                boarddata->wp_type = ESDHC_WP_GPIO;
 
-       of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
-
-       of_property_read_u32(np, "max-frequency", &boarddata->f_max);
-
        if (of_find_property(np, "no-1-8-v", NULL))
                boarddata->support_vsel = false;
        else
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
 
        mmc_of_parse_voltage(np, &host->ocr_mask);
 
+       /* sdr50 and sdr104 needs work on 1.8v signal voltage */
+       if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
+           !IS_ERR(imx_data->pins_default)) {
+               imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
+                                               ESDHC_PINCTRL_STATE_100MHZ);
+               imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
+                                               ESDHC_PINCTRL_STATE_200MHZ);
+               if (IS_ERR(imx_data->pins_100mhz) ||
+                               IS_ERR(imx_data->pins_200mhz)) {
+                       dev_warn(mmc_dev(host->mmc),
+                               "could not get ultra high speed state, work on normal mode\n");
+                       /*
+                        * fall back to not support uhs by specify no 1.8v quirk
+                        */
+                       host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+               }
+       } else {
+               host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+       }
+
        /* call to generic mmc_of_parse to support additional capabilities */
-       return mmc_of_parse(host->mmc);
+       ret = mmc_of_parse(host->mmc);
+       if (ret)
+               return ret;
+
+       if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
+       return 0;
 }
 #else
 static inline int
 sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
                         struct sdhci_host *host,
-                        struct esdhc_platform_data *boarddata)
+                        struct pltfm_imx_data *imx_data)
 {
        return -ENODEV;
 }
 #endif
 
+static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
+                        struct sdhci_host *host,
+                        struct pltfm_imx_data *imx_data)
+{
+       struct esdhc_platform_data *boarddata = &imx_data->boarddata;
+       int err;
+
+       if (!host->mmc->parent->platform_data) {
+               dev_err(mmc_dev(host->mmc), "no board data!\n");
+               return -EINVAL;
+       }
+
+       imx_data->boarddata = *((struct esdhc_platform_data *)
+                               host->mmc->parent->platform_data);
+       /* write_protect */
+       if (boarddata->wp_type == ESDHC_WP_GPIO) {
+               err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
+               if (err) {
+                       dev_err(mmc_dev(host->mmc),
+                               "failed to request write-protect gpio!\n");
+                       return err;
+               }
+               host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+       }
+
+       /* card_detect */
+       switch (boarddata->cd_type) {
+       case ESDHC_CD_GPIO:
+               err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
+               if (err) {
+                       dev_err(mmc_dev(host->mmc),
+                               "failed to request card-detect gpio!\n");
+                       return err;
+               }
+               /* fall through */
+
+       case ESDHC_CD_CONTROLLER:
+               /* we have a working card_detect back */
+               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+               break;
+
+       case ESDHC_CD_PERMANENT:
+               host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+               break;
+
+       case ESDHC_CD_NONE:
+               break;
+       }
+
+       switch (boarddata->max_bus_width) {
+       case 8:
+               host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+               break;
+       case 4:
+               host->mmc->caps |= MMC_CAP_4_BIT_DATA;
+               break;
+       case 1:
+       default:
+               host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
+               break;
+       }
+
+       return 0;
+}
+
 static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
 {
        const struct of_device_id *of_id =
                        of_match_device(imx_esdhc_dt_ids, &pdev->dev);
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_host *host;
-       struct esdhc_platform_data *boarddata;
        int err;
        struct pltfm_imx_data *imx_data;
-       bool dt = true;
 
        host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
        if (IS_ERR(host))
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
        if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
                host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 
-       boarddata = &imx_data->boarddata;
-       if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) {
-               if (!host->mmc->parent->platform_data) {
-                       dev_err(mmc_dev(host->mmc), "no board data!\n");
-                       err = -EINVAL;
-                       goto disable_clk;
-               }
-               imx_data->boarddata = *((struct esdhc_platform_data *)
-                                       host->mmc->parent->platform_data);
-               dt = false;
-       }
-       /* write_protect */
-       if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
-               err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
-               if (err) {
-                       dev_err(mmc_dev(host->mmc),
-                               "failed to request write-protect gpio!\n");
-                       goto disable_clk;
-               }
-               host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-       }
-
-       /* card_detect */
-       switch (boarddata->cd_type) {
-       case ESDHC_CD_GPIO:
-               if (dt)
-                       break;
-               err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
-               if (err) {
-                       dev_err(mmc_dev(host->mmc),
-                               "failed to request card-detect gpio!\n");
-                       goto disable_clk;
-               }
-               /* fall through */
-
-       case ESDHC_CD_CONTROLLER:
-               /* we have a working card_detect back */
-               host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
-               break;
-
-       case ESDHC_CD_PERMANENT:
-               host->mmc->caps |= MMC_CAP_NONREMOVABLE;
-               break;
-
-       case ESDHC_CD_NONE:
-               break;
-       }
-
-       switch (boarddata->max_bus_width) {
-       case 8:
-               host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
-               break;
-       case 4:
-               host->mmc->caps |= MMC_CAP_4_BIT_DATA;
-               break;
-       case 1:
-       default:
-               host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
-               break;
-       }
-
-       /* sdr50 and sdr104 needs work on 1.8v signal voltage */
-       if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
-           !IS_ERR(imx_data->pins_default)) {
-               imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
-                                               ESDHC_PINCTRL_STATE_100MHZ);
-               imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
-                                               ESDHC_PINCTRL_STATE_200MHZ);
-               if (IS_ERR(imx_data->pins_100mhz) ||
-                               IS_ERR(imx_data->pins_200mhz)) {
-                       dev_warn(mmc_dev(host->mmc),
-                               "could not get ultra high speed state, work on normal mode\n");
-                       /* fall back to not support uhs by specify no 1.8v quirk */
-                       host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
-               }
-       } else {
-               host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
-       }
+       if (of_id)
+               err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+       else
+               err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
+       if (err)
+               goto disable_clk;
 
        err = sdhci_add_host(host);
        if (err)
index 3497cfaf683c539edbd062199e03f5692e43bb26..a870c42731d7a4eea86b39e9e965d94a0603631d 100644 (file)
@@ -45,6 +45,6 @@
 #define ESDHC_DMA_SYSCTL       0x40c
 #define ESDHC_DMA_SNOOP                0x00000040
 
-#define ESDHC_HOST_CONTROL_RES 0x05
+#define ESDHC_HOST_CONTROL_RES 0x01
 
 #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
index 9cd5fc62f130871aaa6390ac8e25b0f0f2fedf03..946d37f94a31b29e8739304ec71cf7b1468eead6 100644 (file)
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
                        goto err_of_parse;
                sdhci_get_of_property(pdev);
                pdata = pxav3_get_mmc_pdata(dev);
+               pdev->dev.platform_data = pdata;
        } else if (pdata) {
                /* on-chip device */
                if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
index df088343d60f32413a3694813ad97f9abb57b7ae..255a896769b8691ee9178fb5502e3844ccd1555f 100644 (file)
@@ -4,7 +4,7 @@
  * Support of SDHCI platform devices for spear soc family
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * Inspired by sdhci-pltfm.c
  *
@@ -211,5 +211,5 @@ static struct platform_driver sdhci_driver = {
 module_platform_driver(sdhci_driver);
 
 MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
 MODULE_LICENSE("GPL v2");
index bc1445238fb3053b8d2a03a7bb35a25ddfff4b61..1dbe932320309fc87f75a40cc2552ba5a784d5bd 100644 (file)
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host)
        u32 max_current_caps;
        unsigned int ocr_avail;
        unsigned int override_timeout_clk;
+       u32 max_clk;
        int ret;
 
        WARN_ON(host == NULL);
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host)
                                                      GFP_KERNEL);
                host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
                if (!host->adma_table || !host->align_buffer) {
-                       dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
-                                         host->adma_table, host->adma_addr);
+                       if (host->adma_table)
+                               dma_free_coherent(mmc_dev(mmc),
+                                                 host->adma_table_sz,
+                                                 host->adma_table,
+                                                 host->adma_addr);
                        kfree(host->align_buffer);
                        pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
                                mmc_hostname(mmc));
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host)
         * Set host parameters.
         */
        mmc->ops = &sdhci_ops;
-       mmc->f_max = host->max_clk;
+       max_clk = host->max_clk;
+
        if (host->ops->get_min_clock)
                mmc->f_min = host->ops->get_min_clock(host);
        else if (host->version >= SDHCI_SPEC_300) {
                if (host->clk_mul) {
                        mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
-                       mmc->f_max = host->max_clk * host->clk_mul;
+                       max_clk = host->max_clk * host->clk_mul;
                } else
                        mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
        } else
                mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
 
+       if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+               mmc->f_max = max_clk;
+
        if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
                host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
                                        SDHCI_TIMEOUT_CLK_SHIFT;
index 7fde4d5c2b28beca9c645a498fb3aefc467fbe26..3c45358844eb94bc8d44c3934e0a5689b9adf994 100644 (file)
@@ -1870,8 +1870,6 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
 static void ad_marker_response_received(struct bond_marker *marker,
                                        struct port *port)
 {
-       marker = NULL;
-       port = NULL;
        /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
 }
 
index 317a49480475d2c87fb5a4aa56716844ce21005a..0c627b4733ca56b026e15bf9527330539fd64cd1 100644 (file)
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
        call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
 }
 
+static struct slave *bond_get_old_active(struct bonding *bond,
+                                        struct slave *new_active)
+{
+       struct slave *slave;
+       struct list_head *iter;
+
+       bond_for_each_slave(bond, slave, iter) {
+               if (slave == new_active)
+                       continue;
+
+               if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
+                       return slave;
+       }
+
+       return NULL;
+}
+
 /* bond_do_fail_over_mac
  *
  * Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
                if (!new_active)
                        return;
 
+               if (!old_active)
+                       old_active = bond_get_old_active(bond, new_active);
+
                if (old_active) {
                        ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
                        ether_addr_copy(saddr.sa_data,
@@ -1725,9 +1745,16 @@ err_free:
 
 err_undo_flags:
        /* Enslave of first slave has failed and we need to fix master's mac */
-       if (!bond_has_slaves(bond) &&
-           ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr))
-               eth_hw_addr_random(bond_dev);
+       if (!bond_has_slaves(bond)) {
+               if (ether_addr_equal_64bits(bond_dev->dev_addr,
+                                           slave_dev->dev_addr))
+                       eth_hw_addr_random(bond_dev);
+               if (bond_dev->type != ARPHRD_ETHER) {
+                       ether_setup(bond_dev);
+                       bond_dev->flags |= IFF_MASTER;
+                       bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+               }
+       }
 
        return res;
 }
@@ -1916,6 +1943,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                netdev_info(bond_dev, "Destroying bond %s\n",
                            bond_dev->name);
+               bond_remove_proc_entry(bond);
                unregister_netdevice(bond_dev);
        }
        return ret;
@@ -3751,7 +3779,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
        struct slave *slave;
        struct list_head *iter;
        struct bond_up_slave *new_arr, *old_arr;
-       int slaves_in_agg;
        int agg_id = 0;
        int ret = 0;
 
@@ -3782,7 +3809,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
                        }
                        goto out;
                }
-               slaves_in_agg = ad_info.ports;
                agg_id = ad_info.aggregator_id;
        }
        bond_for_each_slave(bond, slave, iter) {
index 1bda29249d12254ddbdd2e35572597464a9380c2..db760e84119fcb970b7b34f7c4fac92b1acfed52 100644 (file)
@@ -111,6 +111,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
        [IFLA_BOND_AD_USER_PORT_KEY]    = { .type = NLA_U16 },
        [IFLA_BOND_AD_ACTOR_SYSTEM]     = { .type = NLA_BINARY,
                                            .len  = ETH_ALEN },
+       [IFLA_BOND_TLB_DYNAMIC_LB]      = { .type = NLA_U8 },
 };
 
 static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -405,7 +406,6 @@ static int bond_changelink(struct net_device *bond_dev,
                if (err)
                        return err;
        }
-
        if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
                int port_key =
                        nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
@@ -415,7 +415,6 @@ static int bond_changelink(struct net_device *bond_dev,
                if (err)
                        return err;
        }
-
        if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
                if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
                        return -EINVAL;
@@ -426,6 +425,15 @@ static int bond_changelink(struct net_device *bond_dev,
                if (err)
                        return err;
        }
+       if (data[IFLA_BOND_TLB_DYNAMIC_LB]) {
+               int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
+
+               bond_opt_initval(&newval, dynamic_lb);
+               err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
@@ -476,6 +484,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
                nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
                nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
                nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
+               nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
                0;
 }
 
@@ -598,6 +607,10 @@ static int bond_fill_info(struct sk_buff *skb,
                       bond->params.ad_select))
                goto nla_put_failure;
 
+       if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB,
+                      bond->params.tlb_dynamic_lb))
+               goto nla_put_failure;
+
        if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info info;
 
index e9c624d54dd4cdf869d1cf04859dd0010c7c7f21..6dda57e2e724f575490248cb504120fe7e2ca600 100644 (file)
@@ -420,6 +420,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
                .flags = BOND_OPTFLAG_IFDOWN,
                .values = bond_ad_user_port_key_tbl,
                .set = bond_option_ad_user_port_key_set,
+       },
+       [BOND_OPT_NUM_PEER_NOTIF_ALIAS] = {
+               .id = BOND_OPT_NUM_PEER_NOTIF_ALIAS,
+               .name = "num_grat_arp",
+               .desc = "Number of peer notifications to send on failover event",
+               .values = bond_num_peer_notif_tbl,
+               .set = bond_option_num_peer_notif_set
        }
 };
 
index 31835a4dab5784ed11984fadbacd7f5c76fbcc8b..f4ae720862158354d028169de77cafe782f539fa 100644 (file)
@@ -380,7 +380,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
 static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
                   bonding_show_ad_select, bonding_sysfs_store_option);
 
-/* Show and set the number of peer notifications to send after a failover event. */
+/* Show the number of peer notifications to send after a failover event. */
 static ssize_t bonding_show_num_peer_notif(struct device *d,
                                           struct device_attribute *attr,
                                           char *buf)
@@ -388,24 +388,10 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
        struct bonding *bond = to_bond(d);
        return sprintf(buf, "%d\n", bond->params.num_peer_notif);
 }
-
-static ssize_t bonding_store_num_peer_notif(struct device *d,
-                                           struct device_attribute *attr,
-                                           const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
-                  bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+                  bonding_show_num_peer_notif, bonding_sysfs_store_option);
 static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
-                  bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+                  bonding_show_num_peer_notif, bonding_sysfs_store_option);
 
 /* Show the MII monitor interval. */
 static ssize_t bonding_show_miimon(struct device *d,
index f4e40aa4d2a21db2be91eb39f46d1e410ac6c0e0..945c0955a9675198a8b0945ddc49dd6668380838 100644 (file)
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
 
        cf->can_id |= CAN_ERR_CRTL;
        cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
-       netif_receive_skb(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 }
 
 /**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
        }
 
        at91_read_mb(dev, mb, cf);
-       netif_receive_skb(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        can_led_event(dev, CAN_LED_EVENT_RX);
 }
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
                return 0;
 
        at91_poll_err_frame(dev, cf, reg_sr);
-       netif_receive_skb(skb);
 
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        return 1;
 }
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
                return;
 
        at91_irq_err_state(dev, cf, new_state);
-       netif_rx(skb);
 
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        priv->can.state = new_state;
 }
index 27ad312e7abf34bdf94a86ce66fa5ae900da1aed..57dadd52b428a536d71f2364c006641e5e765083 100644 (file)
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
                cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
 
        priv->can.state = state;
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index c11d4498403617e4dddcea42d085e08355c9a5d2..70a8cbb29e75844a02bea49a937bf0c05a200910 100644 (file)
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
                for (i = 0; i < cf->can_dlc; i++)
                        cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
        }
-       netif_rx(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
                }
        }
 
-       netif_rx(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index 6201c5a1a8845f2e3e68f3921a2fcae8e4469217..b1e8d729851cbb5173c1bbec2b34c4893b2ff595 100644 (file)
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
                return 0;
 
        do_bus_err(dev, cf, reg_esr);
-       netif_receive_skb(skb);
 
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        return 1;
 }
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
        if (unlikely(new_state == CAN_STATE_BUS_OFF))
                can_bus_off(dev);
 
-       netif_receive_skb(skb);
-
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        return 1;
 }
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
        }
 
        flexcan_read_fifo(dev, cf);
-       netif_receive_skb(skb);
 
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_receive_skb(skb);
 
        can_led_event(dev, CAN_LED_EVENT_RX);
 
index e3d7e22a4fa080504544a245d9e21adb80aea036..db9538d4b3586e7357ae8d46e4db9f25a27cd469 100644 (file)
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
                                cf->data[i] = (u8)(slot[j] >> shift);
                        }
                }
-               netif_receive_skb(skb);
 
                /* Update statistics and read pointer */
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_receive_skb(skb);
+
                rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
        }
 
index 32bd7f451aa42b53f0a479af667861db6693d619..7b92e911a6168badb3e30f8fc55b2e6fdd0f61dc 100644 (file)
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
        /* release receive buffer */
        sja1000_write_cmdreg(priv, CMD_RRB);
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        can_led_event(dev, CAN_LED_EVENT_RX);
 }
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
                        can_bus_off(dev);
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index a23a7af8eb9a0ccfdffc39518ee9ee7cd43485d1..9a3f15cb7ef4883abebe6bac8ffc5909077d475f 100644 (file)
@@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl)
 
        memcpy(skb_put(skb, sizeof(struct can_frame)),
               &cf, sizeof(struct can_frame));
-       netif_rx_ni(skb);
 
        sl->dev->stats.rx_packets++;
        sl->dev->stats.rx_bytes += cf.can_dlc;
+       netif_rx_ni(skb);
 }
 
 /* parse tty input stream */
index c1a95a34d62ea682d8a2582ddc068ca52c7e592d..b7e83c2120235e133141422ebe037a3bf43e7a15 100644 (file)
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
        if (ret)
                goto out_clk;
 
-       priv->power = devm_regulator_get(&spi->dev, "vdd");
-       priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+       priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
+       priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
        if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
            (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
                ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
        struct spi_device *spi = to_spi_device(dev);
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
 
-       if (priv->after_suspend & AFTER_SUSPEND_POWER) {
+       if (priv->after_suspend & AFTER_SUSPEND_POWER)
                mcp251x_power_enable(priv->power, 1);
+
+       if (priv->after_suspend & AFTER_SUSPEND_UP) {
+               mcp251x_power_enable(priv->transceiver, 1);
                queue_work(priv->wq, &priv->restart_work);
        } else {
-               if (priv->after_suspend & AFTER_SUSPEND_UP) {
-                       mcp251x_power_enable(priv->transceiver, 1);
-                       queue_work(priv->wq, &priv->restart_work);
-               } else {
-                       priv->after_suspend = 0;
-               }
+               priv->after_suspend = 0;
        }
+
        priv->force_quit = 0;
        enable_irq(spi->irq);
        return 0;
index e95a9e1a889f19c4673d9735e5e932eb83340767..cf345cbfe8198ef23ee328fc2eb67f5841751ee1 100644 (file)
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
                }
        }
 
-       netif_rx(skb);
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index 866bac0ae7e966855d1085f5a8735988832a1bb7..2d390384ef3bb3d3845fcf6102bef70715f1dd21 100644 (file)
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
                        cf->data[i] = msg->msg.can_msg.msg[i];
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
                stats->rx_errors++;
        }
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 /*
index 411c1af92c62c1c93985e4adf7232d1b6b78d8bc..0e5a4493ba4fee6d3c4fb5626a676f18802a6ef3 100644 (file)
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
                        cf->data[7] = rxerr;
                }
 
-               netif_rx(skb);
-
                priv->bec.txerr = txerr;
                priv->bec.rxerr = rxerr;
 
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
        }
 }
 
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
                                cf->data[i] = msg->msg.rx.data[i];
                }
 
-               netif_rx(skb);
-
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
        }
 
        return;
index 72427f21edffaa9fed0874085e277aa4c83783b7..6b94007ae05221c94d3dedce77a412e732834337 100644 (file)
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
                hwts->hwtstamp = timeval_to_ktime(tv);
        }
 
-       netif_rx(skb);
        mc->netdev->stats.rx_packets++;
        mc->netdev->stats.rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
        hwts = skb_hwtstamps(skb);
        hwts->hwtstamp = timeval_to_ktime(tv);
 
-       /* push the skb */
-       netif_rx(skb);
-
        /* update statistics */
        mc->netdev->stats.rx_packets++;
        mc->netdev->stats.rx_bytes += cf->can_dlc;
+       /* push the skb */
+       netif_rx(skb);
 
        return 0;
 
index dec51717635e900aafc91b771e5bfa0a2b594fae..7d61b3279798b936f4a3238afb57c73437ba22f2 100644 (file)
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
        hwts = skb_hwtstamps(skb);
        hwts->hwtstamp = timeval_to_ktime(tv);
 
-       netif_rx(skb);
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += can_frame->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
        peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
        hwts = skb_hwtstamps(skb);
        hwts->hwtstamp = timeval_to_ktime(tv);
-       netif_rx(skb);
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += can_frame->can_dlc;
+       netif_rx(skb);
 
        return 0;
 }
index dd52c7a4c80d9f26faa74ece8d109ee21045ba47..de95b1ccba3e3b6d4d00e313acb280cd178a000d 100644 (file)
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
        priv->bec.txerr = txerr;
        priv->bec.rxerr = rxerr;
 
-       netif_rx(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->can_dlc;
+       netif_rx(skb);
 }
 
 /* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
                else
                        memcpy(cf->data, msg->data, cf->can_dlc);
 
-               netif_rx(skb);
-
                stats->rx_packets++;
                stats->rx_bytes += cf->can_dlc;
+               netif_rx(skb);
 
                can_led_event(priv->netdev, CAN_LED_EVENT_RX);
        } else {
index 7ad0a4d8e475f519b0f1a6618091c3c93cb60427..4c483d937481777025e4ab7e8d23ce75316d6f17 100644 (file)
@@ -46,13 +46,13 @@ config NET_DSA_MV88E6171
          ethernet switches chips.
 
 config NET_DSA_MV88E6352
-       tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
+       tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
        depends on NET_DSA
        select NET_DSA_MV88E6XXX
        select NET_DSA_TAG_EDSA
        ---help---
-         This enables support for the Marvell 88E6172, 88E6176 and 88E6352
-         ethernet switch chips.
+         This enables support for the Marvell 88E6172, 88E6176, 88E6320,
+         88E6321 and 88E6352 ethernet switch chips.
 
 config NET_DSA_BCM_SF2
        tristate "Broadcom Starfighter 2 Ethernet switch support"
index 972982f8bea7af16f253b58d3540cc0b9aa6682b..289e20443d83a3507f2700afc91f395828f45149 100644 (file)
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
        }
 
        /* Include the pseudo-PHY address and the broadcast PHY address to
-        * divert reads towards our workaround
+        * divert reads towards our workaround. This is only required for
+        * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
+        * that we can use the regular SWITCH_MDIO master controller instead.
+        *
+        * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
+        * to have a 1:1 mapping between Port address and PHY address in order
+        * to utilize the slave_mii_bus instance to read from Port PHYs. This is
+        * not what we want here, so we initialize phys_mii_mask 0 to always
+        * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
         */
-       ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+       if (of_machine_is_compatible("brcm,bcm7445d0"))
+               ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
+       else
+               ds->phys_mii_mask = 0;
 
        rev = reg_readl(priv, REG_SWITCH_REVISION);
        priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
@@ -890,15 +901,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
                                         struct fixed_phy_status *status)
 {
        struct bcm_sf2_priv *priv = ds_to_priv(ds);
-       u32 duplex, pause, speed;
+       u32 duplex, pause;
        u32 reg;
 
        duplex = core_readl(priv, CORE_DUPSTS);
        pause = core_readl(priv, CORE_PAUSESTS);
-       speed = core_readl(priv, CORE_SPDSTS);
-
-       speed >>= (port * SPDSTS_SHIFT);
-       speed &= SPDSTS_MASK;
 
        status->link = 0;
 
@@ -933,18 +940,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
                reg &= ~LINK_STS;
        core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
 
-       switch (speed) {
-       case SPDSTS_10:
-               status->speed = SPEED_10;
-               break;
-       case SPDSTS_100:
-               status->speed = SPEED_100;
-               break;
-       case SPDSTS_1000:
-               status->speed = SPEED_1000;
-               break;
-       }
-
        if ((pause & (1 << port)) &&
            (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
                status->asym_pause = 1;
index 632815c10a401f7bd873e077a262528b73ceed7d..af210efecc554546a762073eb0121823b9337dcb 100644 (file)
@@ -36,6 +36,18 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
                        return "Marvell 88E6172";
                if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
                        return "Marvell 88E6176";
+               if (ret == PORT_SWITCH_ID_6320_A1)
+                       return "Marvell 88E6320 (A1)";
+               if (ret == PORT_SWITCH_ID_6320_A2)
+                       return "Marvell 88e6320 (A2)";
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6320)
+                       return "Marvell 88E6320";
+               if (ret == PORT_SWITCH_ID_6321_A1)
+                       return "Marvell 88E6321 (A1)";
+               if (ret == PORT_SWITCH_ID_6321_A2)
+                       return "Marvell 88e6321 (A2)";
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6321)
+                       return "Marvell 88E6321";
                if (ret == PORT_SWITCH_ID_6352_A0)
                        return "Marvell 88E6352 (A0)";
                if (ret == PORT_SWITCH_ID_6352_A1)
@@ -80,66 +92,6 @@ static int mv88e6352_setup_global(struct dsa_switch *ds)
        return 0;
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
-{
-       int ret;
-
-       *temp = 0;
-
-       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
-       if (ret < 0)
-               return ret;
-
-       *temp = (ret & 0xff) - 25;
-
-       return 0;
-}
-
-static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
-{
-       int ret;
-
-       *temp = 0;
-
-       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-       if (ret < 0)
-               return ret;
-
-       *temp = (((ret >> 8) & 0x1f) * 5) - 25;
-
-       return 0;
-}
-
-static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
-{
-       int ret;
-
-       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-       if (ret < 0)
-               return ret;
-       temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-       return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
-                                       (ret & 0xe0ff) | (temp << 8));
-}
-
-static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
-{
-       int ret;
-
-       *alarm = false;
-
-       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-       if (ret < 0)
-               return ret;
-
-       *alarm = !!(ret & 0x40);
-
-       return 0;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 static int mv88e6352_setup(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -377,10 +329,10 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .set_eee                = mv88e6xxx_set_eee,
        .get_eee                = mv88e6xxx_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
-       .get_temp               = mv88e6352_get_temp,
-       .get_temp_limit         = mv88e6352_get_temp_limit,
-       .set_temp_limit         = mv88e6352_set_temp_limit,
-       .get_temp_alarm         = mv88e6352_get_temp_alarm,
+       .get_temp               = mv88e6xxx_get_temp,
+       .get_temp_limit         = mv88e6xxx_get_temp_limit,
+       .set_temp_limit         = mv88e6xxx_set_temp_limit,
+       .get_temp_alarm         = mv88e6xxx_get_temp_alarm,
 #endif
        .get_eeprom             = mv88e6352_get_eeprom,
        .set_eeprom             = mv88e6352_set_eeprom,
@@ -394,5 +346,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .fdb_getnext            = mv88e6xxx_port_fdb_getnext,
 };
 
-MODULE_ALIAS("platform:mv88e6352");
 MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6176");
+MODULE_ALIAS("platform:mv88e6320");
+MODULE_ALIAS("platform:mv88e6321");
+MODULE_ALIAS("platform:mv88e6352");
index fd8547c2b79d46786b10807a0c62f338b6a60e27..109452056eff9fc68074bb82b9ccdf8f06adf7de 100644 (file)
@@ -517,6 +517,18 @@ static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
        return false;
 }
 
+static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6320:
+       case PORT_SWITCH_ID_6321:
+               return true;
+       }
+       return false;
+}
+
 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -565,7 +577,7 @@ static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
 {
        int ret;
 
-       if (mv88e6xxx_6352_family(ds))
+       if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
                port = (port + 1) << 5;
 
        /* Snapshot the hardware statistics counters for this port. */
@@ -796,54 +808,6 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
        }
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-       int val;
-
-       *temp = 0;
-
-       mutex_lock(&ps->smi_mutex);
-
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
-       if (ret < 0)
-               goto error;
-
-       /* Enable temperature sensor */
-       ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-       if (ret < 0)
-               goto error;
-
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
-       if (ret < 0)
-               goto error;
-
-       /* Wait for temperature to stabilize */
-       usleep_range(10000, 12000);
-
-       val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-       if (val < 0) {
-               ret = val;
-               goto error;
-       }
-
-       /* Disable temperature sensor */
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
-       if (ret < 0)
-               goto error;
-
-       *temp = ((val & 0x1f) - 5) * 5;
-
-error:
-       _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
-       mutex_unlock(&ps->smi_mutex);
-       return ret;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 /* Must be called with SMI lock held */
 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
                           u16 mask)
@@ -1163,7 +1127,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
 
        newfid = __ffs(ps->fid_mask);
        ps->fid[port] = newfid;
-       ps->fid_mask &= (1 << newfid);
+       ps->fid_mask &= ~(1 << newfid);
        ps->bridge_mask[fid] &= ~(1 << port);
        ps->bridge_mask[newfid] = 1 << port;
 
@@ -1377,7 +1341,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
            mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-           mv88e6xxx_6065_family(ds)) {
+           mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
                /* MAC Forcing register: don't force link, speed,
                 * duplex or flow control state to any particular
                 * values on physical ports, but force the CPU port
@@ -1423,7 +1387,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
            mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-           mv88e6xxx_6185_family(ds))
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
                reg = PORT_CONTROL_IGMP_MLD_SNOOP |
                PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
                PORT_CONTROL_STATE_FORWARDING;
@@ -1431,7 +1395,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
                        reg |= PORT_CONTROL_DSA_TAG;
                if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+                   mv88e6xxx_6320_family(ds)) {
                        if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
                                reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
                        else
@@ -1441,14 +1406,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
                    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
                    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-                   mv88e6xxx_6185_family(ds)) {
+                   mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
                        if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
                                reg |= PORT_CONTROL_EGRESS_ADD_TAG;
                }
        }
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
+           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                if (ds->dsa_port_mask & (1 << port))
                        reg |= PORT_CONTROL_FRAME_MODE_DSA;
                if (port == dsa_upstream_port(ds))
@@ -1473,11 +1439,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        reg = 0;
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6095_family(ds))
+           mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
                reg = PORT_CONTROL_2_MAP_DA;
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds))
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
                reg |= PORT_CONTROL_2_JUMBO_10240;
 
        if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
@@ -1514,7 +1480,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                goto abort;
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                /* Do not limit the period of time that this port can
                 * be paused for by the remote end or the period of
                 * time that this port can pause the remote end.
@@ -1564,7 +1531,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                /* Rate Control: disable ingress rate limiting. */
                ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
                                           PORT_RATE_CONTROL, 0x0001);
@@ -1913,6 +1881,7 @@ int mv88e6xxx_setup_common(struct dsa_switch *ds)
 int mv88e6xxx_setup_global(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
        int i;
 
        /* Set the default address aging time to 5 minutes, and
@@ -1976,7 +1945,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
                          (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                /* Send all frames with destination addresses matching
                 * 01:80:c2:00:00:2x to the CPU port.
                 */
@@ -1995,7 +1965,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                /* Disable ingress rate limiting by resetting all
                 * ingress rate limit registers to their initial
                 * state.
@@ -2009,9 +1980,11 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
        REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
 
        /* Wait for the flush to complete. */
-       _mv88e6xxx_stats_wait(ds);
+       mutex_lock(&ps->smi_mutex);
+       ret = _mv88e6xxx_stats_wait(ds);
+       mutex_unlock(&ps->smi_mutex);
 
-       return 0;
+       return ret;
 }
 
 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
@@ -2162,6 +2135,132 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
        return ret;
 }
 
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+       int val;
+
+       *temp = 0;
+
+       mutex_lock(&ps->smi_mutex);
+
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+       if (ret < 0)
+               goto error;
+
+       /* Enable temperature sensor */
+       ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       if (ret < 0)
+               goto error;
+
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+       if (ret < 0)
+               goto error;
+
+       /* Wait for temperature to stabilize */
+       usleep_range(10000, 12000);
+
+       val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       if (val < 0) {
+               ret = val;
+               goto error;
+       }
+
+       /* Disable temperature sensor */
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+       if (ret < 0)
+               goto error;
+
+       *temp = ((val & 0x1f) - 5) * 5;
+
+error:
+       _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+       mutex_unlock(&ps->smi_mutex);
+       return ret;
+}
+
+static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       int ret;
+
+       *temp = 0;
+
+       ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
+       if (ret < 0)
+               return ret;
+
+       *temp = (ret & 0xff) - 25;
+
+       return 0;
+}
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+       if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+               return mv88e63xx_get_temp(ds, temp);
+
+       return mv88e61xx_get_temp(ds, temp);
+}
+
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       int ret;
+
+       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+               return -EOPNOTSUPP;
+
+       *temp = 0;
+
+       ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+       if (ret < 0)
+               return ret;
+
+       *temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+       return 0;
+}
+
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       int ret;
+
+       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+               return -EOPNOTSUPP;
+
+       ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+       if (ret < 0)
+               return ret;
+       temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+       return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
+                                       (ret & 0xe0ff) | (temp << 8));
+}
+
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       int ret;
+
+       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+               return -EOPNOTSUPP;
+
+       *alarm = false;
+
+       ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+       if (ret < 0)
+               return ret;
+
+       *alarm = !!(ret & 0x40);
+
+       return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
index a650b2656de9f0c4b8181bc3d3b3a4c533b4aa7a..78e37226a37d2d90fd8c8924d484c057a7700d4a 100644 (file)
 #define PORT_SWITCH_ID_6182    0x1a60
 #define PORT_SWITCH_ID_6185    0x1a70
 #define PORT_SWITCH_ID_6240    0x2400
-#define PORT_SWITCH_ID_6320    0x1250
+#define PORT_SWITCH_ID_6320    0x1150
+#define PORT_SWITCH_ID_6320_A1 0x1151
+#define PORT_SWITCH_ID_6320_A2 0x1152
+#define PORT_SWITCH_ID_6321    0x3100
+#define PORT_SWITCH_ID_6321_A1 0x3101
+#define PORT_SWITCH_ID_6321_A2 0x3102
 #define PORT_SWITCH_ID_6350    0x3710
 #define PORT_SWITCH_ID_6351    0x3750
 #define PORT_SWITCH_ID_6352    0x3520
@@ -389,7 +394,10 @@ int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
                        struct ethtool_regs *regs, void *_p);
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
@@ -410,6 +418,7 @@ int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
                             int reg, int val);
+
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
 extern struct dsa_switch_driver mv88e6352_switch_driver;
index f3bb1784066baf51c97f464a1677aac50f781cdb..05aa7597dab9b712de711f61c2d14e8bd0e992e6 100644 (file)
@@ -167,6 +167,7 @@ source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
 source "drivers/net/ethernet/ti/Kconfig"
 source "drivers/net/ethernet/tile/Kconfig"
index c51014b0464f604c0f41118d8dff625e4993aa12..f42177b1172313a3444e17a420cbbdd1f1d74942 100644 (file)
@@ -77,6 +77,7 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
 obj-$(CONFIG_NET_VENDOR_TI) += ti/
 obj-$(CONFIG_TILE_NET) += tile/
index 4566cdf0bc398e977b310729a3ab129a190bc9b1..b9a5a97ed4dd4abc77c488cabdd0a47e99f0693f 100644 (file)
@@ -933,6 +933,21 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcm_sysport_poll_controller(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       disable_irq(priv->irq0);
+       bcm_sysport_rx_isr(priv->irq0, priv);
+       enable_irq(priv->irq0);
+
+       disable_irq(priv->irq1);
+       bcm_sysport_tx_isr(priv->irq1, priv);
+       enable_irq(priv->irq1);
+}
+#endif
+
 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
                                              struct net_device *dev)
 {
@@ -1723,6 +1738,9 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
        .ndo_set_features       = bcm_sysport_set_features,
        .ndo_set_rx_mode        = bcm_sysport_set_rx_mode,
        .ndo_set_mac_address    = bcm_sysport_change_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = bcm_sysport_poll_controller,
+#endif
 };
 
 #define REV_FMT        "v%2x.%02x"
index cd4ae76bbff2f8acda89154e65cf699e141553e5..5762c485ea06e75305a88784e0b34816680e164e 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x.h: Broadcom Everest network driver.
+/* bnx2x.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -30,7 +32,7 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.710.51-0"
+#define DRV_MODULE_VERSION      "1.712.30-0"
 #define DRV_MODULE_RELDATE      "2014/02/10"
 #define BNX2X_BC_VER            0x040200
 
@@ -1226,6 +1228,10 @@ struct bnx2x_slowpath {
                struct eth_classify_rules_ramrod_data   e2;
        } mac_rdata;
 
+       union {
+               struct eth_classify_rules_ramrod_data   e2;
+       } vlan_rdata;
+
        union {
                struct tstorm_eth_mac_filter_config     e1x;
                struct eth_filter_rules_ramrod_data     e2;
@@ -1408,6 +1414,9 @@ struct bnx2x_sp_objs {
 
        /* Queue State object */
        struct bnx2x_queue_sp_obj q_obj;
+
+       /* VLANs object */
+       struct bnx2x_vlan_mac_obj vlan_obj;
 };
 
 struct bnx2x_fp_stats {
@@ -1422,6 +1431,13 @@ enum {
        SUB_MF_MODE_UNKNOWN = 0,
        SUB_MF_MODE_UFP,
        SUB_MF_MODE_NPAR1_DOT_5,
+       SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+       struct list_head link;
+       u16 vid;
+       bool hw;
 };
 
 struct bnx2x {
@@ -1636,6 +1652,8 @@ struct bnx2x {
        u8                      mf_sub_mode;
 #define IS_MF_UFP(bp)          (IS_MF_SD(bp) && \
                                 bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp)           (IS_MF_SD(bp) && \
+                                bp->mf_sub_mode == SUB_MF_MODE_BD)
 
        u8                      wol;
 
@@ -1860,8 +1878,6 @@ struct bnx2x {
        int                                     dcb_version;
 
        /* CAM credit pools */
-
-       /* used only in sriov */
        struct bnx2x_credit_pool_obj            vlans_pool;
 
        struct bnx2x_credit_pool_obj            macs_pool;
@@ -1924,6 +1940,11 @@ struct bnx2x {
        u16 rx_filter;
 
        struct bnx2x_link_report_data           vf_link_vars;
+       struct list_head vlan_reg;
+       u16 vlan_cnt;
+       u16 vlan_credit;
+       u16 vxlan_dst_port;
+       bool accept_any_vlan;
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -1951,23 +1972,14 @@ extern int num_queues;
 #define RSS_IPV6_TCP_CAP_MASK                                          \
        TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 
-/* func init flags */
-#define FUNC_FLG_RSS           0x0001
-#define FUNC_FLG_STATS         0x0002
-/* removed  FUNC_FLG_UNMATCHED 0x0004 */
-#define FUNC_FLG_TPA           0x0008
-#define FUNC_FLG_SPQ           0x0010
-#define FUNC_FLG_LEADING       0x0020  /* PF only */
-#define FUNC_FLG_LEADING_STATS 0x0040
 struct bnx2x_func_init_params {
        /* dma */
-       dma_addr_t      fw_stat_map;    /* valid iff FUNC_FLG_STATS */
-       dma_addr_t      spq_map;        /* valid iff FUNC_FLG_SPQ */
+       bool            spq_active;
+       dma_addr_t      spq_map;
+       u16             spq_prod;
 
-       u16             func_flgs;
        u16             func_id;        /* abs fid */
        u16             pf_id;
-       u16             spq_prod;       /* valid iff FUNC_FLG_SPQ */
 };
 
 #define for_each_cnic_queue(bp, var) \
@@ -2077,6 +2089,11 @@ struct bnx2x_func_init_params {
 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
                      struct bnx2x_vlan_mac_obj *obj, bool set,
                      int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+                      struct bnx2x_vlan_mac_obj *obj, bool set,
+                      unsigned long *ramrod_flags);
+
 /**
  * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
  *
@@ -2481,6 +2498,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define VF_ACQUIRE_THRESH              3
 #define VF_ACQUIRE_MAC_FILTERS         1
 #define VF_ACQUIRE_MC_FILTERS          10
+#define VF_ACQUIRE_VLAN_FILTERS                2 /* VLAN0 + 'real' VLAN */
 
 #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
                            (!((me_reg) & ME_REG_VF_ERR)))
@@ -2577,6 +2595,8 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
 
 void bnx2x_update_mng_version(struct bnx2x *bp);
 
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
 #define MCPR_SCRATCH_BASE(bp) \
        (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 
@@ -2589,4 +2609,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
 #define BNX2X_MAX_PHC_DRIFT 31000000
 #define BNX2X_PTP_TX_TIMEOUT
 
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
 #endif /* bnx2x.h */
index a90d7364334f9dfa3687dc813e068508a342861c..1637de6caf46b5213e3148d5d3e44309e1f1483d 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.c: Broadcom Everest network driver.
+/* bnx2x_cmn.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -2103,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
                if (rss_obj->udp_rss_v6)
                        __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
 
-               if (!CHIP_IS_E1x(bp))
+               if (!CHIP_IS_E1x(bp)) {
+                       /* valid only for TUNN_MODE_VXLAN tunnel mode */
+                       __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+                       __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
                        /* valid only for TUNN_MODE_GRE tunnel mode */
-                       __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
+                       __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+               }
        } else {
                __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
        }
@@ -2510,6 +2517,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
                fp->mode = TPA_MODE_DISABLED;
 }
 
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+       u32 cur;
+
+       if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+               return;
+
+       cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+       DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+          cur, state);
+
+       SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
 int bnx2x_load_cnic(struct bnx2x *bp)
 {
        int i, rc, port = BP_PORT(bp);
@@ -2827,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        /* Start fast path */
 
+       /* Re-configure vlan filters */
+       rc = bnx2x_vlan_reconfigure_vid(bp);
+       if (rc)
+               LOAD_ERROR_EXIT(bp, load_error3);
+
        /* Initialize Rx filter. */
        bnx2x_set_rx_mode_inner(bp);
 
@@ -2873,6 +2899,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                /* mark driver is loaded in shmem2 */
                u32 val;
                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+               val &= ~DRV_FLAGS_MTU_MASK;
+               val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
@@ -2885,10 +2913,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                return -EBUSY;
        }
 
+       /* Update driver data for On-Chip MFW dump. */
+       if (IS_PF(bp))
+               bnx2x_update_mfw_dump(bp);
+
        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
                bnx2x_dcbx_init(bp, false);
 
+       if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
 
        return 0;
@@ -2956,6 +2991,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 
        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
 
+       if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
        /* mark driver is unloaded in shmem2 */
        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
                u32 val;
@@ -3677,7 +3715,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
                pbd2->fw_ip_hdr_to_payload_w =
                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
                pbd_e2->data.tunnel_data.flags |=
-                       ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
+                       ETH_TUNNEL_DATA_IPV6_OUTER;
        }
 
        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
@@ -4184,6 +4222,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+       int mfw_vn = BP_FW_MB_IDX(bp);
+       u32 tmp;
+
+       /* If the shmem shouldn't affect configuration, reflect */
+       if (!IS_MF_BD(bp)) {
+               int i;
+
+               for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+                       c2s_map[i] = i;
+               *c2s_default = 0;
+
+               return;
+       }
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       c2s_map[0] = tmp & 0xff;
+       c2s_map[1] = (tmp >> 8) & 0xff;
+       c2s_map[2] = (tmp >> 16) & 0xff;
+       c2s_map[3] = (tmp >> 24) & 0xff;
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       c2s_map[4] = tmp & 0xff;
+       c2s_map[5] = (tmp >> 8) & 0xff;
+       c2s_map[6] = (tmp >> 16) & 0xff;
+       c2s_map[7] = (tmp >> 24) & 0xff;
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
 /**
  * bnx2x_setup_tc - routine to configure net_device for multi tc
  *
@@ -4194,8 +4267,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
  */
 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
 {
-       int cos, prio, count, offset;
        struct bnx2x *bp = netdev_priv(dev);
+       u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+       int cos, prio, count, offset;
 
        /* setup tc must be called under rtnl lock */
        ASSERT_RTNL();
@@ -4219,12 +4293,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
                return -EINVAL;
        }
 
+       bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
        /* configure priority to traffic class mapping */
        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
-               netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+               int outer_prio = c2s_map[prio];
+
+               netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
                   "mapping priority %d to tc %d\n",
-                  prio, bp->prio_to_cos[prio]);
+                  outer_prio, bp->prio_to_cos[outer_prio]);
        }
 
        /* Use this configuration to differentiate tc0 from other COSes
@@ -4278,6 +4356,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
        if (netif_running(dev))
                rc = bnx2x_set_eth_mac(bp, true);
 
+       if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+               SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
        return rc;
 }
 
@@ -4831,6 +4912,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
         */
        dev->mtu = new_mtu;
 
+       if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+               SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
        return bnx2x_reload_if_running(dev);
 }
 
index 03b7404d5b9ba59c5470fe36ec0746d6b75f7eee..fa7c532012654eb05ad42fce20e92c6ab03cef32 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.h: Broadcom Everest network driver.
+/* bnx2x_cmn.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -620,6 +622,14 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
  */
 void bnx2x_tx_timeout(struct net_device *dev);
 
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp:                        driver handle
+ * @c2s_map:           should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default:       entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
 /*********************** Inlines **********************************/
 /*********************** Fast path ********************************/
 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -931,14 +941,33 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
        start_params->mf_mode = bp->mf_mode;
        start_params->sd_vlan_tag = bp->mf_ov;
 
+       /* Configure Ethertype for BD mode */
+       if (IS_MF_BD(bp)) {
+               DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+               start_params->sd_vlan_eth_type = ETH_P_8021AD;
+               REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+               REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+               REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+               bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+                                     &start_params->c2s_pri_default);
+               start_params->c2s_pri_valid = 1;
+
+               DP(NETIF_MSG_IFUP,
+                  "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+                  start_params->c2s_pri[0], start_params->c2s_pri[1],
+                  start_params->c2s_pri[2], start_params->c2s_pri[3],
+                  start_params->c2s_pri[4], start_params->c2s_pri[5],
+                  start_params->c2s_pri[6], start_params->c2s_pri[7],
+                  start_params->c2s_pri_default);
+       }
+
        if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
                start_params->network_cos_mode = STATIC_COS;
        else /* CHIP_IS_E1X */
                start_params->network_cos_mode = FW_WRR;
 
-       start_params->tunnel_mode       = TUNN_MODE_GRE;
-       start_params->gre_tunnel_type   = IPGRE_TUNNEL;
-       start_params->inner_gre_rss_en  = 1;
+       start_params->inner_rss = 1;
 
        if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
                start_params->class_fail_ethtype = ETH_P_FIP;
@@ -1037,6 +1066,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
                           BNX2X_FILTER_MAC_PENDING,
                           &bp->sp_state, obj_type,
                           &bp->macs_pool);
+
+       if (!CHIP_IS_E1x(bp))
+               bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+                                   fp->cl_id, fp->cid, BP_FUNC(bp),
+                                   bnx2x_sp(bp, vlan_rdata),
+                                   bnx2x_sp_mapping(bp, vlan_rdata),
+                                   BNX2X_FILTER_VLAN_PENDING,
+                                   &bp->sp_state, obj_type,
+                                   &bp->vlans_pool);
 }
 
 /**
@@ -1096,7 +1134,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
        bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
                                   bnx2x_get_path_func_num(bp));
 
-       bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+       bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
                                    bnx2x_get_path_func_num(bp));
 
        /* RSS configuration object */
@@ -1106,6 +1144,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
                                  bnx2x_sp_mapping(bp, rss_rdata),
                                  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
                                  BNX2X_OBJ_TYPE_RX);
+
+       bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
 }
 
 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
@@ -1339,4 +1379,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp);
 void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
                            u32 verbose);
 
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp:                driver handle
+ * @state:     OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
 #endif /* BNX2X_CMN_H */
index 6e4294ed1fc997e5545fcc48a24c92124129867c..7ccf6684e0a32d3b4a6caa78fa1fbff8dbbef460 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.c: Broadcom Everest network driver.
+/* bnx2x_dcb.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -1850,6 +1852,8 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
                        if (bp->dcbx_port_params.ets.cos_params[cos].
                                                pri_bitmask & pri_bit)
                                        tt2cos[pri].cos = cos;
+
+               pfc_fw_cfg->dcb_outer_pri[pri]  = ttp[pri];
        }
 
        /* we never want the FW to add a 0 vlan tag */
index c6939ecb02c572fd41fa5d2c814ed5ad932c26e7..9a9517c0f703f756beb7df5522250f9b802c3ad1 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.h: Broadcom Everest network driver.
+/* bnx2x_dcb.h: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
index 741aa130c19f4efb4df2e2b64b166f24bd4b6b3e..eccfa13b0f2d5c43132e581ac15c32f752903404 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_dump.h: Broadcom Everest network driver.
+/* bnx2x_dump.h: QLogic Everest network driver.
  *
  * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  */
 
index 76b9052a961c517978494199d74398264583508c..6b2050a198df8ebd43fb29ec4176424491f29ac8 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_ethtool.c: Broadcom Everest network driver.
+/* bnx2x_ethtool.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1129,6 +1131,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        } else
                bp->wol = 0;
 
+       if (SHMEM2_HAS(bp, curr_cfg))
+               SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
        return 0;
 }
 
@@ -3562,17 +3567,8 @@ static int bnx2x_get_ts_info(struct net_device *dev,
 
                info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
                                   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
                                   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+                                  (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
                info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
 
index 7636e3c18771dced3a1d4bd3db0018cee4b4d7f3..226ab29f4cb6a4d10a9d41b501701bf953040c17 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define MAX_COS_NUMBER 4
 #define MAX_TRAFFIC_TYPES 8
 #define MAX_PFC_PRIORITIES 8
-
+#define MAX_VLAN_PRIORITIES 8
        /* used by array traffic_type_to_priority[] to mark traffic type \
        that is not mapped to priority*/
 #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
index 8aafd9b5d6a2b107f67a8cce344b4bb479ccc85c..9e3b5a1e9f4f490bec6f74d1cb1f5c949289d67d 100644 (file)
@@ -1,6 +1,8 @@
 /* bnx2x_fw_file_hdr.h: FW binary file header structure.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 058bc73282201e8b9f897273c18ac18856eac38e..08a08fa49caad3fb8850b0f92f4285c43a667835 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_hsi.h: Broadcom Everest network driver.
+/* bnx2x_hsi.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -729,6 +731,7 @@ struct port_hw_cfg {                    /* port 0: 0x12c  port 1: 0x2bc */
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616      0x00001000
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834      0x00001100
+               #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858      0x00001200
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
 
@@ -786,6 +789,7 @@ struct port_hw_cfg {                    /* port 0: 0x12c  port 1: 0x2bc */
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616       0x00001000
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834       0x00001100
+               #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858       0x00001200
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
@@ -864,6 +868,7 @@ struct shared_feat_cfg {             /* NVRAM Offset */
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE      0x00000400
+               #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE        0x00000500
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE       0x00000600
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE  0x00000700
 
@@ -2064,6 +2069,26 @@ struct ncsi_oem_fcoe_features {
        #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET          0
 };
 
+enum curr_cfg_method_e {
+       CURR_CFG_MET_NONE = 0,  /* default config */
+       CURR_CFG_MET_OS = 1,
+       CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+struct mdump_driver_info {
+       u32 epoc;
+       u32 drv_ver;
+       u32 fw_ver;
+
+       u32 valid_dump;
+       #define FIRST_DUMP_VALID        (1 << 0)
+       #define SECOND_DUMP_VALID       (1 << 1)
+
+       u32 flags;
+       #define ENABLE_ALL_TRIGGERS     (0x7fffffff)
+       #define TRIGGER_MDUMP_ONCE      (1 << 31)
+};
+
 struct ncsi_oem_data {
        u32 driver_version[4];
        struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
@@ -2187,6 +2212,8 @@ struct shmem2_region {
 #define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
 #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
 #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+#define DRV_FLAGS_MTU_MASK                     0xffff0000
+#define DRV_FLAGS_MTU_SHIFT                    16
 
        u32 extended_dev_info_shared_cfg_size;
 
@@ -2251,6 +2278,7 @@ struct shmem2_region {
        u32 reserved4;                          /* Offset 0x150 */
        u32 link_attr_sync[PORT_MAX];           /* Offset 0x154 */
        #define LINK_ATTR_SYNC_KR2_ENABLE       0x00000001
+       #define LINK_ATTR_84858                 0x00000002
        #define LINK_SFP_EEPROM_COMP_CODE_MASK  0x0000ff00
        #define LINK_SFP_EEPROM_COMP_CODE_SHIFT          8
        #define LINK_SFP_EEPROM_COMP_CODE_SR    0x00001000
@@ -2268,6 +2296,74 @@ struct shmem2_region {
 
        /* We use indication for each PF (0..3) */
 #define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+       union { /* For various OEMs */                  /* Offset 0x1a0 */
+               u8 storage_boot_prog[E2_FUNC_MAX];
+       #define STORAGE_BOOT_PROG_MASK                          0x000000FF
+       #define STORAGE_BOOT_PROG_NONE                          0x00000000
+       #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED             0x00000002
+       #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS     0x00000002
+       #define STORAGE_BOOT_PROG_TARGET_FOUND                  0x00000004
+       #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS            0x00000008
+       #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND                0x00000008
+       #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT               0x00000010
+       #define STORAGE_BOOT_PROG_IMG_DOWNLOADED                0x00000020
+       #define STORAGE_BOOT_PROG_OS_HANDOFF                    0x00000040
+       #define STORAGE_BOOT_PROG_COMPLETED                     0x00000080
+
+               u32 oem_i2c_data_addr;
+       };
+
+       /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+       /* For PCP values 0-3 use the map lower */
+       /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+        * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+        */
+       u32 c2s_pcp_map_lower[E2_FUNC_MAX];                     /* 0x1a4 */
+
+       /* For PCP values 4-7 use the map upper */
+       /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+        * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+        */
+       u32 c2s_pcp_map_upper[E2_FUNC_MAX];                     /* 0x1b4 */
+
+       /* For PCP default value get the MSB byte of the map default */
+       u32 c2s_pcp_map_default[E2_FUNC_MAX];                   /* 0x1c4 */
+
+       /* FC_NPIV table offset in NVRAM */
+       u32 fc_npiv_nvram_tbl_addr[PORT_MAX];                   /* 0x1d4 */
+
+       /* Shows last method that changed configuration of this device */
+       enum curr_cfg_method_e curr_cfg;                        /* 0x1dc */
+
+       /* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+        * MM - Major, mm - Minor, bb - Build ,dd - Drop
+        */
+       u32 netproc_fw_ver;                                     /* 0x1e0 */
+
+       /* Option ROM SMASH CLP version */
+       u32 clp_ver;                                            /* 0x1e4 */
+
+       u32 pcie_bus_num;                                       /* 0x1e8 */
+
+       u32 sriov_switch_mode;                                  /* 0x1ec */
+       #define SRIOV_SWITCH_MODE_NONE          0x0
+       #define SRIOV_SWITCH_MODE_VEB           0x1
+       #define SRIOV_SWITCH_MODE_VEPA          0x2
+
+       u8  rsrv2[E2_FUNC_MAX];                                 /* 0x1f0 */
+
+       u32 img_inv_table_addr; /* Address to INV_TABLE_P */    /* 0x1f4 */
+
+       u32 mtu_size[E2_FUNC_MAX];                              /* 0x1f8 */
+
+       u32 os_driver_state[E2_FUNC_MAX];                       /* 0x208 */
+       #define OS_DRIVER_STATE_NOT_LOADED      0 /* not installed */
+       #define OS_DRIVER_STATE_LOADING         1 /* transition state */
+       #define OS_DRIVER_STATE_DISABLED        2 /* installed but disabled */
+       #define OS_DRIVER_STATE_ACTIVE          3 /* installed and active */
+
+       /* mini dump driver info */
+       struct mdump_driver_info drv_info;                      /* 0x218 */
 };
 
 
@@ -2898,8 +2994,8 @@ struct afex_stats {
 };
 
 #define BCM_5710_FW_MAJOR_VERSION                      7
-#define BCM_5710_FW_MINOR_VERSION                      10
-#define BCM_5710_FW_REVISION_VERSION           51
+#define BCM_5710_FW_MINOR_VERSION                      12
+#define BCM_5710_FW_REVISION_VERSION           30
 #define BCM_5710_FW_ENGINEERING_VERSION                0
 #define BCM_5710_FW_COMPILE_FLAGS                      1
 
@@ -3901,7 +3997,11 @@ struct eth_fast_path_rx_cqe {
        __le16 len_on_bd;
        struct parsing_flags pars_flags;
        union eth_sgl_or_raw_data sgl_or_raw_data;
-       __le32 reserved1[7];
+       u8 tunn_type;
+       u8 tunn_inner_hdrs_offset;
+       __le16 reserved1;
+       __le32 tunn_tenant_id;
+       __le32 padding[5];
        u32 marker;
 };
 
@@ -4012,8 +4112,8 @@ struct eth_tunnel_data {
        __le16 pseudo_csum;
        u8 ip_hdr_start_inner_w;
        u8 flags;
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
 #define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
 #define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
 };
@@ -4120,16 +4220,12 @@ struct eth_rss_update_ramrod_data {
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
        u8 rss_result_mask;
        u8 reserved3;
        __le16 reserved4;
@@ -4314,6 +4410,18 @@ enum eth_tunnel_non_lso_csum_location {
        MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
 };
 
+enum eth_tunn_type {
+       TUNN_TYPE_NONE,
+       TUNN_TYPE_VXLAN,
+       TUNN_TYPE_L2_GRE,
+       TUNN_TYPE_IPV4_GRE,
+       TUNN_TYPE_IPV6_GRE,
+       TUNN_TYPE_L2_GENEVE,
+       TUNN_TYPE_IPV4_GENEVE,
+       TUNN_TYPE_IPV6_GENEVE,
+       MAX_ETH_TUNN_TYPE
+};
+
 /*
  * Tx regular BD structure
  */
@@ -4758,6 +4866,9 @@ struct afex_vif_list_ramrod_data {
        __le16 reserved1;
 };
 
+struct c2s_pri_trans_table_entry {
+       u8 val[MAX_VLAN_PRIORITIES];
+};
 
 /*
  * cfc delete event data
@@ -5246,6 +5357,7 @@ struct flow_control_configuration {
        u8 dont_add_pri_0_en;
        u8 reserved1;
        __le32 reserved2;
+       u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
 };
 
 
@@ -5260,18 +5372,25 @@ struct function_start_data {
        u8 path_id;
        u8 network_cos_mode;
        u8 dmae_cmd_id;
-       u8 tunnel_mode;
-       u8 gre_tunnel_type;
-       u8 tunn_clss_en;
-       u8 inner_gre_rss_en;
-       u8 sd_accept_mf_clss_fail;
+       u8 no_added_tags;
+       __le16 reserved0;
+       __le32 reserved1;
+       u8 inner_clss_vxlan;
+       u8 inner_clss_l2gre;
+       u8 inner_clss_l2geneve;
+       u8 inner_rss;
        __le16 vxlan_dst_port;
+       __le16 geneve_dst_port;
+       u8 sd_accept_mf_clss_fail;
+       u8 sd_accept_mf_clss_fail_match_ethtype;
        __le16 sd_accept_mf_clss_fail_ethtype;
        __le16 sd_vlan_eth_type;
        u8 sd_vlan_force_pri_flg;
        u8 sd_vlan_force_pri_val;
-       u8 sd_accept_mf_clss_fail_match_ethtype;
-       u8 no_added_tags;
+       u8 c2s_pri_tt_valid;
+       u8 c2s_pri_default;
+       u8 reserved2[6];
+       struct c2s_pri_trans_table_entry c2s_pri_trans_table;
 };
 
 struct function_update_data {
@@ -5289,11 +5408,12 @@ struct function_update_data {
        u8 tx_switch_suspend;
        u8 echo;
        u8 update_tunn_cfg_flg;
-       u8 tunnel_mode;
-       u8 gre_tunnel_type;
-       u8 tunn_clss_en;
-       u8 inner_gre_rss_en;
+       u8 inner_clss_vxlan;
+       u8 inner_clss_l2gre;
+       u8 inner_clss_l2geneve;
+       u8 inner_rss;
        __le16 vxlan_dst_port;
+       __le16 geneve_dst_port;
        u8 sd_vlan_force_pri_change_flg;
        u8 sd_vlan_force_pri_flg;
        u8 sd_vlan_force_pri_val;
@@ -5302,6 +5422,8 @@ struct function_update_data {
        u8 reserved1;
        __le16 sd_vlan_tag;
        __le16 sd_vlan_eth_type;
+       __le16 reserved0;
+       __le32 reserved2;
 };
 
 /*
@@ -5330,15 +5452,6 @@ struct fw_version {
 #define __FW_VERSION_RESERVED_SHIFT 4
 };
 
-
-/* GRE Tunnel Mode */
-enum gre_tunnel_type {
-       NVGRE_TUNNEL,
-       L2GRE_TUNNEL,
-       IPGRE_TUNNEL,
-       MAX_GRE_TUNNEL_TYPE
-};
-
 /*
  * Dynamic Host-Coalescing - Driver(host) counters
  */
index d6e1975b7b691ab51ab536a7c9ec413601f24b6c..46ee2c01f4c5167209c23f3e1440b82630327cea 100644 (file)
@@ -1,7 +1,9 @@
-/* bnx2x_init.h: Broadcom Everest network driver.
+/* bnx2x_init.h: Qlogic Everest network driver.
  *               Structures and macroes needed during the initialization.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 5669ed2e87d0039ab02c3bc70636359e49c811e6..1835d2e451c0139e272e400774b0d6d19487df98 100644 (file)
@@ -1,8 +1,10 @@
-/* bnx2x_init_ops.h: Broadcom Everest network driver.
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
  *               Static functions needed during the initialization.
  *               This file is "included" in bnx2x_main.c.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index a0b03c27e0a302c08fd1a78c5dbd2dd7606ae16a..d946bba43726f94b0d8a62973978a19d07959390 100644 (file)
@@ -1,13 +1,15 @@
 /* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Written by Yaniv Rosner
@@ -9652,6 +9654,13 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 /******************************************************************/
 /*             BCM8481/BCM84823/BCM84833 PHY SECTION             */
 /******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+       return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+               (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+               (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
                                            struct bnx2x *bp,
                                            u8 port)
@@ -9666,8 +9675,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
        };
        u16 fw_ver1;
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+       if (bnx2x_is_8483x_8485x(phy)) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
                bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
                                phy->ver_addr);
@@ -9749,8 +9757,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
                bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
                                 reg_set[i].val);
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+       if (bnx2x_is_8483x_8485x(phy))
                offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
        else
                offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
@@ -9768,8 +9775,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        switch (action) {
        case PHY_INIT:
-               if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-                   (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+               if (!bnx2x_is_8483x_8485x(phy)) {
                        /* Save spirom version */
                        bnx2x_save_848xx_spirom_version(phy, bp, params->port);
                }
@@ -9901,8 +9907,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
        /* Always write this if this is not 84833/4.
         * For 84833/4, write it only when it's a forced speed.
         */
-       if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-            (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+       if (!bnx2x_is_8483x_8485x(phy) ||
            ((autoneg_val & (1<<12)) == 0))
                bnx2x_cl45_write(bp, phy,
                         MDIO_AN_DEVAD,
@@ -9949,8 +9954,86 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
        return bnx2x_848xx_cmn_config_init(phy, params, vars);
 }
 
-#define PHY84833_CMDHDLR_WAIT 300
-#define PHY84833_CMDHDLR_MAX_ARGS 5
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+                               struct link_params *params,
+                               u16 fw_cmd,
+                               u16 cmd_args[], int argc)
+{
+       int idx;
+       u16 val;
+       struct bnx2x *bp = params->bp;
+
+       /* Step 1: Poll the STATUS register to see whether the previous command
+        * is in progress or the system is busy (CMD_IN_PROGRESS or
+        * SYSTEM_BUSY). If previous command is in progress or system is busy,
+        * check again until the previous command finishes execution and the
+        * system is available for taking command
+        */
+
+       for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_848xx_CMD_HDLR_STATUS, &val);
+               if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+                   (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+                       break;
+               usleep_range(1000, 2000);
+       }
+       if (idx >= PHY848xx_CMDHDLR_WAIT) {
+               DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+               return -EINVAL;
+       }
+
+       /* Step2: If any parameters are required for the function, write them
+        * to the required DATA registers
+        */
+
+       for (idx = 0; idx < argc; idx++) {
+               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+                                MDIO_848xx_CMD_HDLR_DATA1 + idx,
+                                cmd_args[idx]);
+       }
+
+       /* Step3: When the firmware is ready for commands, write the 'Command
+        * code' to the CMD register
+        */
+       bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+                        MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+       /* Step4: Once the command has been written, poll the STATUS register
+        * to check whether the command has completed (CMD_COMPLETED_PASS/
+        * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+        */
+
+       for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_848xx_CMD_HDLR_STATUS, &val);
+               if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+                   (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+                       break;
+               usleep_range(1000, 2000);
+       }
+       if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+           (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+               DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+               return -EINVAL;
+       }
+       /* Step5: Once the command has completed, read the specficied DATA
+        * registers for any saved results for the command, if applicable
+        */
+
+       /* Gather returning data */
+       for (idx = 0; idx < argc; idx++) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_848xx_CMD_HDLR_DATA1 + idx,
+                               &cmd_args[idx]);
+       }
+
+       return 0;
+}
+
 static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
                                struct link_params *params, u16 fw_cmd,
                                u16 cmd_args[], int argc)
@@ -9960,16 +10043,16 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        /* Write CMD_OPEN_OVERRIDE to STATUS reg */
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_CMD_HDLR_STATUS,
+                       MDIO_848xx_CMD_HDLR_STATUS,
                        PHY84833_STATUS_CMD_OPEN_OVERRIDE);
-       for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+       for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_CMD_HDLR_STATUS, &val);
+                               MDIO_848xx_CMD_HDLR_STATUS, &val);
                if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
                        break;
                usleep_range(1000, 2000);
        }
-       if (idx >= PHY84833_CMDHDLR_WAIT) {
+       if (idx >= PHY848xx_CMDHDLR_WAIT) {
                DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
                return -EINVAL;
        }
@@ -9977,42 +10060,62 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
        /* Prepare argument(s) and issue command */
        for (idx = 0; idx < argc; idx++) {
                bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_CMD_HDLR_DATA1 + idx,
+                               MDIO_848xx_CMD_HDLR_DATA1 + idx,
                                cmd_args[idx]);
        }
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
-       for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+                       MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+       for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_CMD_HDLR_STATUS, &val);
+                               MDIO_848xx_CMD_HDLR_STATUS, &val);
                if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
-                       (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+                   (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
                        break;
                usleep_range(1000, 2000);
        }
-       if ((idx >= PHY84833_CMDHDLR_WAIT) ||
-               (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+       if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+           (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
                DP(NETIF_MSG_LINK, "FW cmd failed.\n");
                return -EINVAL;
        }
        /* Gather returning data */
        for (idx = 0; idx < argc; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_CMD_HDLR_DATA1 + idx,
+                               MDIO_848xx_CMD_HDLR_DATA1 + idx,
                                &cmd_args[idx]);
        }
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_CMD_HDLR_STATUS,
+                       MDIO_848xx_CMD_HDLR_STATUS,
                        PHY84833_STATUS_CMD_CLEAR_COMPLETE);
        return 0;
 }
 
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
-                                  struct link_params *params,
-                                  struct link_vars *vars)
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+                               struct link_params *params,
+                               u16 fw_cmd,
+                               u16 cmd_args[], int argc)
+{
+       struct bnx2x *bp = params->bp;
+
+       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+           (REG_RD(bp, params->shmem2_base +
+                   offsetof(struct shmem2_region,
+                            link_attr_sync[params->port])) &
+            LINK_ATTR_84858)) {
+               return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+                                           argc);
+       } else {
+               return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+                                           argc);
+       }
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+                                    struct link_params *params,
+                                    struct link_vars *vars)
 {
        u32 pair_swap;
-       u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+       u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
        int status;
        struct bnx2x *bp = params->bp;
 
@@ -10028,8 +10131,9 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
        /* Only the second argument is used for this command */
        data[1] = (u16)pair_swap;
 
-       status = bnx2x_84833_cmd_hdlr(phy, params,
-               PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
+       status = bnx2x_848xx_cmd_hdlr(phy, params,
+                                     PHY848xx_CMD_SET_PAIR_SWAP, data,
+                                     PHY848xx_CMDHDLR_MAX_ARGS);
        if (status == 0)
                DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
 
@@ -10118,8 +10222,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
 
        /* Prevent Phy from working in EEE and advertising it */
-       rc = bnx2x_84833_cmd_hdlr(phy, params,
-               PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+       rc = bnx2x_848xx_cmd_hdlr(phy, params,
+                                 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
        if (rc) {
                DP(NETIF_MSG_LINK, "EEE disable failed.\n");
                return rc;
@@ -10136,8 +10240,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u16 cmd_args = 1;
 
-       rc = bnx2x_84833_cmd_hdlr(phy, params,
-               PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+       rc = bnx2x_848xx_cmd_hdlr(phy, params,
+                                 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
        if (rc) {
                DP(NETIF_MSG_LINK, "EEE enable failed.\n");
                return rc;
@@ -10155,7 +10259,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        u8 port, initialize = 1;
        u16 val;
        u32 actual_phy_selection;
-       u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+       u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
        int rc = 0;
 
        usleep_range(1000, 2000);
@@ -10180,8 +10284,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        /* Wait for GPHY to come out of reset */
        msleep(50);
-       if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-           (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+       if (!bnx2x_is_8483x_8485x(phy)) {
                /* BCM84823 requires that XGXS links up first @ 10G for normal
                 * behavior.
                 */
@@ -10192,7 +10295,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
                vars->line_speed = temp;
        }
+       /* Check if this is actually BCM84858 */
+       if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+               u16 hw_rev;
 
+               bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                               MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+               if (hw_rev == BCM84858_PHY_ID) {
+                       params->link_attr_sync |= LINK_ATTR_84858;
+                       bnx2x_update_link_attr(params, params->link_attr_sync);
+               }
+       }
+
+       /* Set dual-media configuration according to configuration */
        bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
                        MDIO_CTL_REG_84823_MEDIA, &val);
        val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
@@ -10237,18 +10352,17 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
                   params->multi_phy_config, val);
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
-               bnx2x_84833_pair_swap_cfg(phy, params, vars);
+       if (bnx2x_is_8483x_8485x(phy)) {
+               bnx2x_848xx_pair_swap_cfg(phy, params, vars);
 
                /* Keep AutogrEEEn disabled. */
                cmd_args[0] = 0x0;
                cmd_args[1] = 0x0;
                cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
                cmd_args[3] = PHY84833_CONSTANT_LATENCY;
-               rc = bnx2x_84833_cmd_hdlr(phy, params,
-                       PHY84833_CMD_SET_EEE_MODE, cmd_args,
-                       PHY84833_CMDHDLR_MAX_ARGS);
+               rc = bnx2x_848xx_cmd_hdlr(phy, params,
+                                         PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+                                         PHY848xx_CMDHDLR_MAX_ARGS);
                if (rc)
                        DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
        }
@@ -10302,8 +10416,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
        }
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+       if (bnx2x_is_8483x_8485x(phy)) {
                /* Bring PHY out of super isolate mode as the final step. */
                bnx2x_cl45_read_and_write(bp, phy,
                                          MDIO_CTL_DEVAD,
@@ -10435,8 +10548,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
                                LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
 
                /* Determine if EEE was negotiated */
-               if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-                   (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+               if (bnx2x_is_8483x_8485x(phy))
                        bnx2x_eee_an_resolve(phy, params, vars);
        }
 
@@ -11842,6 +11954,40 @@ static const struct bnx2x_phy phy_84834 = {
        .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
+static const struct bnx2x_phy phy_84858 = {
+       .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+       .addr           = 0xff,
+       .def_md_devad   = 0,
+       .flags          = FLAGS_FAN_FAILURE_DET_REQ |
+                           FLAGS_REARM_LATCH_SIGNAL,
+       .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .mdio_ctrl      = 0,
+       .supported      = (SUPPORTED_100baseT_Half |
+                          SUPPORTED_100baseT_Full |
+                          SUPPORTED_1000baseT_Full |
+                          SUPPORTED_10000baseT_Full |
+                          SUPPORTED_TP |
+                          SUPPORTED_Autoneg |
+                          SUPPORTED_Pause |
+                          SUPPORTED_Asym_Pause),
+       .media_type     = ETH_PHY_BASE_T,
+       .ver_addr       = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+       .config_init    = (config_init_t)bnx2x_848x3_config_init,
+       .read_status    = (read_status_t)bnx2x_848xx_read_status,
+       .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+       .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
+       .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+       .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+       .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
 static const struct bnx2x_phy phy_54618se = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
        .addr           = 0xff,
@@ -12128,6 +12274,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
                *phy = phy_84834;
                break;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+               *phy = phy_84858;
+               break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
                *phy = phy_54618se;
@@ -12184,9 +12333,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        }
        phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-       if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-            (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
-           (phy->ver_addr)) {
+       if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
                /* Remove 100Mb link supported for BCM84833/4 when phy fw
                 * version lower than or equal to 1.39
                 */
@@ -13281,6 +13428,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
                /* GPIO3's are linked, and so both need to be toggled
                 * to obtain required 2us pulse.
                 */
index d9cce4c3899b7b9d388cf28a6f4bf0feece3c4b2..b7d251108c19f56345304d88097619a58b08b8e4 100644 (file)
@@ -1,13 +1,15 @@
 /* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Written by Yaniv Rosner
index c27af12314ed29ae19e73a9c00f56c062a5aa830..31c63aa2252166a4a9fb5d811735d764e8d1d082 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_main.c: Broadcom Everest network driver.
+/* bnx2x_main.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define TX_TIMEOUT             (5*HZ)
 
 static char version[] =
-       "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+       "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
        DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II "
+MODULE_DESCRIPTION("QLogic "
                   "BCM57710/57711/57711E/"
                   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
                   "57840/57840_MF Driver");
@@ -163,27 +165,27 @@ enum bnx2x_board_type {
 static struct {
        char *name;
 } board_info[] = {
-       [BCM57710]      = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
-       [BCM57711]      = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
-       [BCM57711E]     = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
-       [BCM57712]      = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
-       [BCM57712_MF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
-       [BCM57712_VF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
-       [BCM57800]      = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
-       [BCM57800_MF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
-       [BCM57800_VF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
-       [BCM57810]      = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
-       [BCM57810_MF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
-       [BCM57810_VF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
-       [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
-       [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
-       [BCM57840_MF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
-       [BCM57840_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
-       [BCM57811]      = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
-       [BCM57811_MF]   = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
-       [BCM57840_O]    = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
-       [BCM57840_MFO]  = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
-       [BCM57811_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+       [BCM57710]      = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+       [BCM57711]      = { "QLogic BCM57711 10 Gigabit PCIe" },
+       [BCM57711E]     = { "QLogic BCM57711E 10 Gigabit PCIe" },
+       [BCM57712]      = { "QLogic BCM57712 10 Gigabit Ethernet" },
+       [BCM57712_MF]   = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+       [BCM57712_VF]   = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+       [BCM57800]      = { "QLogic BCM57800 10 Gigabit Ethernet" },
+       [BCM57800_MF]   = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+       [BCM57800_VF]   = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+       [BCM57810]      = { "QLogic BCM57810 10 Gigabit Ethernet" },
+       [BCM57810_MF]   = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+       [BCM57810_VF]   = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+       [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
+       [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
+       [BCM57840_MF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+       [BCM57840_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+       [BCM57811]      = { "QLogic BCM57811 10 Gigabit Ethernet" },
+       [BCM57811_MF]   = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+       [BCM57840_O]    = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+       [BCM57840_MFO]  = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+       [BCM57811_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
 };
 
 #ifndef PCI_DEVICE_ID_NX2_57710
@@ -2916,7 +2918,7 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
        func_params.f_obj = &bp->func_obj;
        func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
-       if (IS_MF_UFP(bp)) {
+       if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
                int func = BP_ABS_FUNC(bp);
                u32 val;
 
@@ -2943,16 +2945,16 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
                        BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
                                  bp->mf_ov);
                        goto fail;
+               } else {
+                       DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+                          bp->mf_ov);
                }
-
-               DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
-
-               bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
-
-               return;
+       } else {
+               goto fail;
        }
 
-       /* not supported by SW yet */
+       bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+       return;
 fail:
        bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
 }
@@ -3065,7 +3067,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
        storm_memset_func_en(bp, p->func_id, 1);
 
        /* spq */
-       if (p->func_flgs & FUNC_FLG_SPQ) {
+       if (p->spq_active) {
                storm_memset_spq_addr(bp, p->spq_map, p->func_id);
                REG_WR(bp, XSEM_REG_FAST_MEMORY +
                       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
@@ -3281,7 +3283,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
 {
        struct bnx2x_func_init_params func_init = {0};
        struct event_ring_data eq_data = { {0} };
-       u16 flags;
 
        if (!CHIP_IS_E1x(bp)) {
                /* reset IGU PF statistics: MSIX + ATTN */
@@ -3298,15 +3299,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
                                BP_FUNC(bp) : BP_VN(bp))*4, 0);
        }
 
-       /* function setup flags */
-       flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
-
-       /* This flag is relevant for E1x only.
-        * E2 doesn't have a TPA configuration in a function level.
-        */
-       flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
-
-       func_init.func_flgs = flags;
+       func_init.spq_active = true;
        func_init.pf_id = BP_FUNC(bp);
        func_init.func_id = BP_FUNC(bp);
        func_init.spq_map = bp->spq_mapping;
@@ -3707,6 +3700,34 @@ out:
           ethver, iscsiver, fcoever);
 }
 
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+       struct timeval epoc;
+       u32 drv_ver;
+       u32 valid_dump;
+
+       if (!SHMEM2_HAS(bp, drv_info))
+               return;
+
+       /* Update Driver load time */
+       do_gettimeofday(&epoc);
+       SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
+
+       drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+       SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+       SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+       /* Check & notify On-Chip dump. */
+       valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+       if (valid_dump & FIRST_DUMP_VALID)
+               DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+       if (valid_dump & SECOND_DUMP_VALID)
+               DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
 {
        u32 cmd_ok, cmd_fail;
@@ -5273,6 +5294,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
                else
                        vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
 
+               break;
+       case BNX2X_FILTER_VLAN_PENDING:
+               DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+               vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
                break;
        case BNX2X_FILTER_MCAST_PENDING:
                DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
@@ -5568,6 +5593,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                      BNX2X_STATE_OPEN):
                case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
                      BNX2X_STATE_OPENING_WAIT4_PORT):
+               case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+                     BNX2X_STATE_CLOSING_WAIT4_HALT):
                        cid = elem->message.data.eth_event.echo &
                                BNX2X_SWCID_MASK;
                        DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
@@ -5585,7 +5612,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                      BNX2X_STATE_DIAG):
                case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
                      BNX2X_STATE_CLOSING_WAIT4_HALT):
-                       DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+                       DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
                        bnx2x_handle_classification_eqe(bp, elem);
                        break;
 
@@ -6173,6 +6200,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
                __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
                __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+               if (bp->accept_any_vlan) {
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+               }
+
                break;
        case BNX2X_RX_MODE_ALLMULTI:
                __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
@@ -6184,6 +6216,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
                __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
                __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+               if (bp->accept_any_vlan) {
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+               }
+
                break;
        case BNX2X_RX_MODE_PROMISC:
                /* According to definition of SI mode, iface in promisc mode
@@ -6204,18 +6241,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
                else
                        __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
 
+               __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+               __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
                break;
        default:
                BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
                return -EINVAL;
        }
 
-       /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
-       if (rx_mode != BNX2X_RX_MODE_NONE) {
-               __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
-               __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
-       }
-
        return 0;
 }
 
@@ -7429,6 +7463,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
        } else
                BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 
+       if (SHMEM2_HAS(bp, netproc_fw_ver))
+               SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
        return 0;
 }
 
@@ -8406,6 +8443,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
        return rc;
 }
 
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+                      struct bnx2x_vlan_mac_obj *obj, bool set,
+                      unsigned long *ramrod_flags)
+{
+       int rc;
+       struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+       memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+       /* Fill general parameters */
+       ramrod_param.vlan_mac_obj = obj;
+       ramrod_param.ramrod_flags = *ramrod_flags;
+
+       /* Fill a user request section if needed */
+       if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+               ramrod_param.user_req.u.vlan.vlan = vlan;
+               /* Set the command: ADD or DEL */
+               if (set)
+                       ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+               else
+                       ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+       }
+
+       rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+       if (rc == -EEXIST) {
+               /* Do not treat adding same vlan as error. */
+               DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+               rc = 0;
+       } else if (rc < 0) {
+               BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+       }
+
+       return rc;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
                       struct bnx2x_vlan_mac_obj *mac_obj,
                       int mac_type, bool wait_for_comp)
@@ -11678,7 +11751,7 @@ static void validate_set_si_mode(struct bnx2x *bp)
 static int bnx2x_get_hwinfo(struct bnx2x *bp)
 {
        int /*abs*/func = BP_ABS_FUNC(bp);
-       int vn;
+       int vn, mfw_vn;
        u32 val = 0, val2 = 0;
        int rc = 0;
 
@@ -11768,6 +11841,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
        bp->mf_mode = 0;
        bp->mf_sub_mode = 0;
        vn = BP_VN(bp);
+       mfw_vn = BP_FW_MB_IDX(bp);
 
        if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
                BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -11824,6 +11898,31 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
                                } else
                                        BNX2X_DEV_INFO("illegal OV for SD\n");
                                break;
+                       case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+                               bp->mf_mode = MULTI_FUNCTION_SD;
+                               bp->mf_sub_mode = SUB_MF_MODE_BD;
+                               bp->mf_config[vn] =
+                                       MF_CFG_RD(bp,
+                                                 func_mf_config[func].config);
+
+                               if (SHMEM2_HAS(bp, mtu_size)) {
+                                       int mtu_idx = BP_FW_MB_IDX(bp);
+                                       u16 mtu_size;
+                                       u32 mtu;
+
+                                       mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+                                       mtu_size = (u16)mtu;
+                                       DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+                                          mtu_size, mtu);
+
+                                       /* if valid: update device mtu */
+                                       if (((mtu_size + ETH_HLEN) >=
+                                            ETH_MIN_PACKET_SIZE) &&
+                                           (mtu_size <=
+                                            ETH_MAX_JUMBO_PACKET_SIZE))
+                                               bp->dev->mtu = mtu_size;
+                               }
+                               break;
                        case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
                                bp->mf_mode = MULTI_FUNCTION_SD;
                                bp->mf_sub_mode = SUB_MF_MODE_UFP;
@@ -11871,9 +11970,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
 
                                BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
                                               func, bp->mf_ov, bp->mf_ov);
-                       } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+                       } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+                                  (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
                                dev_err(&bp->pdev->dev,
-                                       "Unexpected - no valid MF OV for func %d in UFP mode\n",
+                                       "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
                                        func);
                                bp->path_has_ovlan = true;
                        } else {
@@ -12078,6 +12178,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->drv_info_mutex);
        sema_init(&bp->stats_lock, 1);
        bp->drv_info_mng_owner = false;
+       INIT_LIST_HEAD(&bp->vlan_reg);
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12596,6 +12697,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
        return vxlan_features_check(skb, features);
 }
 
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+       int rc;
+
+       if (IS_PF(bp)) {
+               unsigned long ramrod_flags = 0;
+
+               __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+               rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+                                       add, &ramrod_flags);
+       } else {
+               rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+       }
+
+       return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+       struct bnx2x_vlan_entry *vlan;
+       int rc = 0;
+
+       if (!bp->vlan_cnt) {
+               DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+               return 0;
+       }
+
+       list_for_each_entry(vlan, &bp->vlan_reg, link) {
+               /* Prepare for cleanup in case of errors */
+               if (rc) {
+                       vlan->hw = false;
+                       continue;
+               }
+
+               if (!vlan->hw)
+                       continue;
+
+               DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+               rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+               if (rc) {
+                       BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+                       vlan->hw = false;
+                       rc = -EINVAL;
+                       continue;
+               }
+       }
+
+       return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x_vlan_entry *vlan;
+       bool hw = false;
+       int rc = 0;
+
+       if (!netif_running(bp->dev)) {
+               DP(NETIF_MSG_IFUP,
+                  "Ignoring VLAN configuration the interface is down\n");
+               return -EFAULT;
+       }
+
+       DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+       vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+       if (!vlan)
+               return -ENOMEM;
+
+       bp->vlan_cnt++;
+       if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+               DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+               bp->accept_any_vlan = true;
+               if (IS_PF(bp))
+                       bnx2x_set_rx_mode_inner(bp);
+               else
+                       bnx2x_vfpf_storm_rx_mode(bp);
+       } else if (bp->vlan_cnt <= bp->vlan_credit) {
+               rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+               hw = true;
+       }
+
+       vlan->vid = vid;
+       vlan->hw = hw;
+
+       if (!rc) {
+               list_add(&vlan->link, &bp->vlan_reg);
+       } else {
+               bp->vlan_cnt--;
+               kfree(vlan);
+       }
+
+       DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+       return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x_vlan_entry *vlan;
+       int rc = 0;
+
+       if (!netif_running(bp->dev)) {
+               DP(NETIF_MSG_IFUP,
+                  "Ignoring VLAN configuration the interface is down\n");
+               return -EFAULT;
+       }
+
+       DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+       if (!bp->vlan_cnt) {
+               BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+               return -EINVAL;
+       }
+
+       list_for_each_entry(vlan, &bp->vlan_reg, link)
+               if (vlan->vid == vid)
+                       break;
+
+       if (vlan->vid != vid) {
+               BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+               return -EINVAL;
+       }
+
+       if (vlan->hw)
+               rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+       list_del(&vlan->link);
+       kfree(vlan);
+
+       bp->vlan_cnt--;
+
+       if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+               /* Configure all non-configured entries */
+               list_for_each_entry(vlan, &bp->vlan_reg, link) {
+                       if (vlan->hw)
+                               continue;
+
+                       rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+                       if (rc) {
+                               BNX2X_ERR("Unable to config VLAN %d\n",
+                                         vlan->vid);
+                               continue;
+                       }
+                       DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+                          vlan->vid);
+                       vlan->hw = true;
+               }
+               DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+               bp->accept_any_vlan = false;
+               if (IS_PF(bp))
+                       bnx2x_set_rx_mode_inner(bp);
+               else
+                       bnx2x_vfpf_storm_rx_mode(bp);
+       }
+
+       DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+       return rc;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
@@ -12609,6 +12873,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_fix_features       = bnx2x_fix_features,
        .ndo_set_features       = bnx2x_set_features,
        .ndo_tx_timeout         = bnx2x_tx_timeout,
+       .ndo_vlan_rx_add_vid    = bnx2x_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = bnx2x_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = poll_bnx2x,
 #endif
@@ -12819,6 +13085,18 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
+       /* VF with OLD Hypervisor or old PF do not support filtering */
+       if (IS_PF(bp)) {
+               if (CHIP_IS_E1x(bp))
+                       bp->accept_any_vlan = true;
+               else
+                       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+       } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+               dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+       }
+
        dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
        dev->features |= NETIF_F_HIGHDMA;
 
@@ -13561,6 +13839,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 
        bnx2x_register_phc(bp);
 
+       if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
        return 0;
 
 init_one_exit:
@@ -13623,6 +13904,7 @@ static void __bnx2x_remove(struct pci_dev *pdev,
        /* Power on: we can't let PCI layer write to us while we are in D3 */
        if (IS_PF(bp)) {
                bnx2x_set_power_state(bp, PCI_D0);
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
 
                /* Set endianity registers to reset values in case next driver
                 * boots in different endianty environment.
index caf1aef651eb0bd38081887b68315597c2a5dc16..a91ccbf363451585bfd51e4203180fa45b9f8bcd 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 49d511092c82fc514832fc4aa30078a08a6f19ad..4dead49bd5cb0866ff89db2c5f2e149536b1ea3c 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_reg.h: Broadcom Everest network driver.
+/* bnx2x_reg.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 /* [RW 1] When this bit is set; the LLH will expect all packets to be with
    e1hov */
 #define NIG_REG_LLH_E1HOV_MODE                                  0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1                                0x16028
 /* [RW 1] When this bit is set; the LLH will classify the packet before
    sending it to the BRB or calculating WoL on it. */
 #define NIG_REG_LLH_MF_MODE                                     0x16024
 #define PBF_REG_TQ_OCCUPANCY_Q0                                         0x1403ac
 /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
 #define PBF_REG_TQ_OCCUPANCY_Q1                                         0x1403b0
-#define PB_REG_CONTROL                                          0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0                                     0x15c06c
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK                                      0x28
 /* [R 2] Interrupt register #0 read */
 #define PRS_REG_TCM_CURRENT_CREDIT                              0x40160
 /* [R 8] debug only: TSDM current credit. Transaction based. */
 #define PRS_REG_TSDM_CURRENT_CREDIT                             0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0                                     0x401a8
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT                    (0x1<<19)
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF                     (0x1<<20)
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN                  (0x1<<22)
@@ -7240,6 +7257,9 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_AN_REG_8481_LEGACY_MII_CTRL       0xffe0
 #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G     0x40
 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS     0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB               0xffe2
+#define BCM84858_PHY_ID                                        0x600d
+#define MDIO_AN_REG_848xx_ID_LSB               0xffe3
 #define MDIO_AN_REG_8481_LEGACY_AN_ADV         0xffe4
 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION   0xffe6
 #define MDIO_AN_REG_8481_1000T_CTRL            0xffe9
@@ -7283,31 +7303,31 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_84833_TOP_CFG_FW_NO_EEE           0x1f81
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1                        0x401a
 #define MDIO_84833_SUPER_ISOLATE               0x8000
-/* These are mailbox register set used by 84833. */
-#define MDIO_84833_TOP_CFG_SCRATCH_REG0                        0x4005
-#define MDIO_84833_TOP_CFG_SCRATCH_REG1                        0x4006
-#define MDIO_84833_TOP_CFG_SCRATCH_REG2                        0x4007
-#define MDIO_84833_TOP_CFG_SCRATCH_REG3                        0x4008
-#define MDIO_84833_TOP_CFG_SCRATCH_REG4                        0x4009
-#define MDIO_84833_TOP_CFG_SCRATCH_REG26               0x4037
-#define MDIO_84833_TOP_CFG_SCRATCH_REG27               0x4038
-#define MDIO_84833_TOP_CFG_SCRATCH_REG28               0x4039
-#define MDIO_84833_TOP_CFG_SCRATCH_REG29               0x403a
-#define MDIO_84833_TOP_CFG_SCRATCH_REG30               0x403b
-#define MDIO_84833_TOP_CFG_SCRATCH_REG31               0x403c
-#define MDIO_84833_CMD_HDLR_COMMAND    MDIO_84833_TOP_CFG_SCRATCH_REG0
-#define MDIO_84833_CMD_HDLR_STATUS     MDIO_84833_TOP_CFG_SCRATCH_REG26
-#define MDIO_84833_CMD_HDLR_DATA1      MDIO_84833_TOP_CFG_SCRATCH_REG27
-#define MDIO_84833_CMD_HDLR_DATA2      MDIO_84833_TOP_CFG_SCRATCH_REG28
-#define MDIO_84833_CMD_HDLR_DATA3      MDIO_84833_TOP_CFG_SCRATCH_REG29
-#define MDIO_84833_CMD_HDLR_DATA4      MDIO_84833_TOP_CFG_SCRATCH_REG30
-#define MDIO_84833_CMD_HDLR_DATA5      MDIO_84833_TOP_CFG_SCRATCH_REG31
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0                        0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1                        0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2                        0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3                        0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4                        0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26               0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27               0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28               0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29               0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30               0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31               0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND    (MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS     (MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1      (MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2      (MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3      (MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4      (MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5      (MDIO_848xx_TOP_CFG_SCRATCH_REG31)
 
-/* Mailbox command set used by 84833. */
-#define PHY84833_CMD_SET_PAIR_SWAP                     0x8001
-#define PHY84833_CMD_GET_EEE_MODE                      0x8008
-#define PHY84833_CMD_SET_EEE_MODE                      0x8009
-/* Mailbox status set used by 84833. */
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP                     0x8001
+#define PHY848xx_CMD_GET_EEE_MODE                      0x8008
+#define PHY848xx_CMD_SET_EEE_MODE                      0x8009
+/* Mailbox status set used by 84833 only */
 #define PHY84833_STATUS_CMD_RECEIVED                   0x0001
 #define PHY84833_STATUS_CMD_IN_PROGRESS                        0x0002
 #define PHY84833_STATUS_CMD_COMPLETE_PASS              0x0004
@@ -7318,6 +7338,13 @@ Theotherbitsarereservedandshouldbezero*/
 #define PHY84833_STATUS_CMD_CLEAR_COMPLETE             0x0080
 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE              0xa5a5
 
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED                   0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS                        0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS              0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR             0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY                        0xbbbb
+
 
 /* Warpcore clause 45 addressing */
 #define MDIO_WC_DEVAD                                  0x3
index 4ad415ac8cfe4a56ffd00858d1f70d9f1ab01456..c9bd7f16018e7616b8cedf03c78c41dce058aa79 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_sp.c: Broadcom Everest network driver.
+/* bnx2x_sp.c: Qlogic Everest network driver.
  *
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -355,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 
        return vp->get(vp, 1);
 }
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+       struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+       struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+       if (!mp->get(mp, 1))
+               return false;
+
+       if (!vp->get(vp, 1)) {
+               mp->put(mp, 1);
+               return false;
+       }
+
+       return true;
+}
+
 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
 {
        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -383,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
        return vp->put(vp, 1);
 }
 
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+       struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+       struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+       if (!mp->put(mp, 1))
+               return false;
+
+       if (!vp->put(vp, 1)) {
+               mp->get(mp, 1);
+               return false;
+       }
+
+       return true;
+}
+
 /**
  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
  *
@@ -636,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
        return 0;
 }
 
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+                                   struct bnx2x_vlan_mac_obj *o,
+                                  union bnx2x_classification_ramrod_data *data)
+{
+       struct bnx2x_vlan_mac_registry_elem *pos;
+
+       DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+          data->vlan_mac.mac, data->vlan_mac.vlan);
+
+       list_for_each_entry(pos, &o->head, link)
+               if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+                   (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+                                 ETH_ALEN)) &&
+                   (data->vlan_mac.is_inner_mac ==
+                    pos->u.vlan_mac.is_inner_mac))
+                       return -EEXIST;
+
+       return 0;
+}
+
 /* check_del() callbacks */
 static struct bnx2x_vlan_mac_registry_elem *
        bnx2x_check_mac_del(struct bnx2x *bp,
@@ -670,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem *
        return NULL;
 }
 
+static struct bnx2x_vlan_mac_registry_elem *
+       bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+                                struct bnx2x_vlan_mac_obj *o,
+                                union bnx2x_classification_ramrod_data *data)
+{
+       struct bnx2x_vlan_mac_registry_elem *pos;
+
+       DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+          data->vlan_mac.mac, data->vlan_mac.vlan);
+
+       list_for_each_entry(pos, &o->head, link)
+               if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+                   (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+                            ETH_ALEN)) &&
+                   (data->vlan_mac.is_inner_mac ==
+                    pos->u.vlan_mac.is_inner_mac))
+                       return pos;
+
+       return NULL;
+}
+
 /* check_move() callback */
 static bool bnx2x_check_move(struct bnx2x *bp,
                             struct bnx2x_vlan_mac_obj *src_o,
@@ -1036,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
                                        rule_cnt);
 }
 
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+                                     struct bnx2x_vlan_mac_obj *o,
+                                     struct bnx2x_exeq_elem *elem,
+                                     int rule_idx, int cam_offset)
+{
+       struct bnx2x_raw_obj *raw = &o->raw;
+       struct eth_classify_rules_ramrod_data *data =
+               (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+       int rule_cnt = rule_idx + 1;
+       union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+       enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+       bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+       u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+       u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+       u16 inner_mac;
+
+       /* Reset the ramrod data buffer for the first rule */
+       if (rule_idx == 0)
+               memset(data, 0, sizeof(*data));
+
+       /* Set a rule header */
+       bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+                                     &rule_entry->pair.header);
+
+       /* Set VLAN and MAC themselves */
+       rule_entry->pair.vlan = cpu_to_le16(vlan);
+       bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+                             &rule_entry->pair.mac_mid,
+                             &rule_entry->pair.mac_lsb, mac);
+       inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+       rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+       /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+       if (cmd == BNX2X_VLAN_MAC_MOVE) {
+               struct bnx2x_vlan_mac_obj *target_obj;
+
+               rule_entry++;
+               rule_cnt++;
+
+               /* Setup ramrod data */
+               target_obj = elem->cmd_data.vlan_mac.target_obj;
+               bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+                                             true, CLASSIFY_RULE_OPCODE_PAIR,
+                                             &rule_entry->pair.header);
+
+               /* Set a VLAN itself */
+               rule_entry->pair.vlan = cpu_to_le16(vlan);
+               bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+                                     &rule_entry->pair.mac_mid,
+                                     &rule_entry->pair.mac_lsb, mac);
+               rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+       }
+
+       /* Set the ramrod data header */
+       bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+                                       rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:                device handle
+ * @o:         bnx2x_vlan_mac_obj
+ * @elem:      bnx2x_exeq_elem
+ * @rule_idx:  rule_idx
+ * @cam_offset:        cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+                                      struct bnx2x_vlan_mac_obj *o,
+                                      struct bnx2x_exeq_elem *elem,
+                                      int rule_idx, int cam_offset)
+{
+       struct bnx2x_raw_obj *raw = &o->raw;
+       struct mac_configuration_cmd *config =
+               (struct mac_configuration_cmd *)(raw->rdata);
+       /* 57710 and 57711 do not support MOVE command,
+        * so it's either ADD or DEL
+        */
+       bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+               true : false;
+
+       /* Reset the ramrod data buffer */
+       memset(config, 0, sizeof(*config));
+
+       bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+                                    cam_offset, add,
+                                    elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+                                    elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+                                    ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
 /**
  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
  *
@@ -1135,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
        return NULL;
 }
 
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+       struct bnx2x_exe_queue_obj *o,
+       struct bnx2x_exeq_elem *elem)
+{
+       struct bnx2x_exeq_elem *pos;
+       struct bnx2x_vlan_mac_ramrod_data *data =
+               &elem->cmd_data.vlan_mac.u.vlan_mac;
+
+       /* Check pending for execution commands */
+       list_for_each_entry(pos, &o->exe_queue, link)
+               if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+                           sizeof(*data)) &&
+                   (pos->cmd_data.vlan_mac.cmd ==
+                    elem->cmd_data.vlan_mac.cmd))
+                       return pos;
+
+       return NULL;
+}
+
 /**
  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
  *
@@ -2042,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
        }
 }
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+                            struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+                            u8 cl_id, u32 cid, u8 func_id, void *rdata,
+                            dma_addr_t rdata_mapping, int state,
+                            unsigned long *pstate, bnx2x_obj_type type,
+                            struct bnx2x_credit_pool_obj *macs_pool,
+                            struct bnx2x_credit_pool_obj *vlans_pool)
+{
+       union bnx2x_qable_obj *qable_obj =
+               (union bnx2x_qable_obj *)vlan_mac_obj;
+
+       bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+                                  rdata_mapping, state, pstate, type,
+                                  macs_pool, vlans_pool);
+
+       /* CAM pool handling */
+       vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+       vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+       /* CAM offset is relevant for 57710 and 57711 chips only which have a
+        * single CAM for both MACs and VLAN-MAC pairs. So the offset
+        * will be taken from MACs' pool object only.
+        */
+       vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+       vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+       if (CHIP_IS_E1(bp)) {
+               BNX2X_ERR("Do not support chips others than E2\n");
+               BUG();
+       } else if (CHIP_IS_E1H(bp)) {
+               vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+               vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+               vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+               vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+               vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+               /* Exe Queue */
+               bnx2x_exe_queue_init(bp,
+                                    &vlan_mac_obj->exe_queue, 1, qable_obj,
+                                    bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
+                                    bnx2x_optimize_vlan_mac,
+                                    bnx2x_execute_vlan_mac,
+                                    bnx2x_exeq_get_vlan_mac);
+       } else {
+               vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+               vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+               vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+               vlan_mac_obj->check_move        = bnx2x_check_move;
+               vlan_mac_obj->ramrod_cmd        =
+                       RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+               /* Exe Queue */
+               bnx2x_exe_queue_init(bp,
+                                    &vlan_mac_obj->exe_queue,
+                                    CLASSIFY_RULES_COUNT,
+                                    qable_obj, bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
+                                    bnx2x_optimize_vlan_mac,
+                                    bnx2x_execute_vlan_mac,
+                                    bnx2x_exeq_get_vlan_mac);
+       }
+}
 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
                        struct tstorm_eth_mac_filter_config *mac_filters,
@@ -3854,8 +4101,8 @@ static bool bnx2x_credit_pool_get_entry_always_true(
  * If credit is negative pool operations will always succeed (unlimited pool).
  *
  */
-static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
-                                         int base, int credit)
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+                           int base, int credit)
 {
        /* Zero the object first */
        memset(p, 0, sizeof(*p));
@@ -3934,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
                /* CAM credit is equaly divided between all active functions
                 * on the PATH.
                 */
-               if ((func_num > 0)) {
+               if (func_num > 0) {
                        if (!CHIP_REV_IS_SLOW(bp))
-                               cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+                               cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
                        else
                                cam_sz = BNX2X_CAM_SIZE_EMUL;
 
@@ -3966,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
                 * on the PATH.
                 */
                if (func_num > 0) {
-                       int credit = MAX_VLAN_CREDIT_E2 / func_num;
-                       bnx2x_init_credit_pool(p, func_id * credit, credit);
+                       int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+                       bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
                } else
                        /* this should never happen! Block VLAN operations. */
                        bnx2x_init_credit_pool(p, 0, 0);
@@ -4060,8 +4308,14 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
        if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
 
-       if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
-               caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+       if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+               caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+       if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+               caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+       if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+               caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
 
        /* RSS keys */
        if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
@@ -5669,10 +5923,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
        rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
        rdata->path_id          = BP_PATH(bp);
        rdata->network_cos_mode = start_params->network_cos_mode;
-       rdata->tunnel_mode      = start_params->tunnel_mode;
-       rdata->gre_tunnel_type  = start_params->gre_tunnel_type;
-       rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
-       rdata->vxlan_dst_port   = cpu_to_le16(4789);
+
+       rdata->vxlan_dst_port   = cpu_to_le16(start_params->vxlan_dst_port);
+       rdata->geneve_dst_port  = cpu_to_le16(start_params->geneve_dst_port);
+       rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
+       rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+       rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
+       rdata->inner_rss        = start_params->inner_rss;
+
        rdata->sd_accept_mf_clss_fail = start_params->class_fail;
        if (start_params->class_fail_ethtype) {
                rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
@@ -5690,6 +5948,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
                        cpu_to_le16(0x8100);
 
        rdata->no_added_tags = start_params->no_added_tags;
+
+       rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+       if (rdata->c2s_pri_tt_valid) {
+               memcpy(rdata->c2s_pri_trans_table.val,
+                      start_params->c2s_pri,
+                      MAX_VLAN_PRIORITIES);
+               rdata->c2s_pri_default = start_params->c2s_pri_default;
+       }
        /* No need for an explicit memory barrier here as long we would
         * need to ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
@@ -5750,15 +6016,22 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
        if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
                     &switch_update_params->changes)) {
                rdata->update_tunn_cfg_flg = 1;
-               if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+                            &switch_update_params->changes))
+                       rdata->inner_clss_l2gre = 1;
+               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+                            &switch_update_params->changes))
+                       rdata->inner_clss_vxlan = 1;
+               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
                             &switch_update_params->changes))
-                       rdata->tunn_clss_en = 1;
-               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+                       rdata->inner_clss_l2geneve = 1;
+               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
                             &switch_update_params->changes))
-                       rdata->inner_gre_rss_en = 1;
-               rdata->tunnel_mode = switch_update_params->tunnel_mode;
-               rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
-               rdata->vxlan_dst_port = cpu_to_le16(4789);
+                       rdata->inner_rss = 1;
+               rdata->vxlan_dst_port =
+                       cpu_to_le16(switch_update_params->vxlan_dst_port);
+               rdata->geneve_dst_port =
+                       cpu_to_le16(switch_update_params->geneve_dst_port);
        }
 
        rdata->echo = SWITCH_UPDATE;
@@ -5885,6 +6158,8 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
                rdata->traffic_type_to_priority_cos[i] =
                        tx_start_params->traffic_type_to_priority_cos[i];
 
+       for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+               rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
        /* No need for an explicit memory barrier here as long as we
         * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
index 86baecb7c60c41a3cf096ad884efe674fb9221b3..4048fc594cce53f183d7c4d840dd4a0b38c80404 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_sp.h: Broadcom Everest network driver.
+/* bnx2x_sp.h: Qlogic Everest network driver.
  *
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -711,7 +713,10 @@ enum {
        BNX2X_RSS_IPV6,
        BNX2X_RSS_IPV6_TCP,
        BNX2X_RSS_IPV6_UDP,
-       BNX2X_RSS_GRE_INNER_HDRS,
+
+       BNX2X_RSS_IPV4_VXLAN,
+       BNX2X_RSS_IPV6_VXLAN,
+       BNX2X_RSS_TUNN_INNER_HDRS,
 };
 
 struct bnx2x_config_rss_params {
@@ -1105,8 +1110,10 @@ enum {
        BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
        BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
        BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
-       BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
-       BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+       BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+       BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+       BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+       BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
 };
 
 /* Allowed Function states */
@@ -1171,19 +1178,23 @@ struct bnx2x_func_start_params {
        /* Function cos mode */
        u8 network_cos_mode;
 
-       /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
-       u8 tunnel_mode;
+       /* UDP dest port for VXLAN */
+       u16 vxlan_dst_port;
 
-       /* tunneling classification enablement */
-       u8 tunn_clss_en;
+       /* UDP dest port for Geneve */
+       u16 geneve_dst_port;
 
-       /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
-       u8 gre_tunnel_type;
+       /* Enable inner Rx classifications for L2GRE packets */
+       u8 inner_clss_l2gre;
 
-       /* Enables Inner GRE RSS on the function, depends on the client RSS
-        * capailities
-        */
-       u8 inner_gre_rss_en;
+       /* Enable inner Rx classifications for L2-Geneve packets */
+       u8 inner_clss_l2geneve;
+
+       /* Enable inner Rx classification for vxlan packets */
+       u8 inner_clss_vxlan;
+
+       /* Enable RSS according to inner header */
+       u8 inner_rss;
 
        /* Allows accepting of packets failing MF classification, possibly
         * only matching a given ethertype
@@ -1200,6 +1211,11 @@ struct bnx2x_func_start_params {
 
        /* Prevent inner vlans from being added by FW */
        u8 no_added_tags;
+
+       /* Inner-to-Outer vlan priority mapping */
+       u8 c2s_pri[MAX_VLAN_PRIORITIES];
+       u8 c2s_pri_default;
+       u8 c2s_pri_valid;
 };
 
 struct bnx2x_func_switch_update_params {
@@ -1207,8 +1223,8 @@ struct bnx2x_func_switch_update_params {
        u16 vlan;
        u16 vlan_eth_type;
        u8 vlan_force_prio;
-       u8 tunnel_mode;
-       u8 gre_tunnel_type;
+       u16 vxlan_dst_port;
+       u16 geneve_dst_port;
 };
 
 struct bnx2x_func_afex_update_params {
@@ -1229,6 +1245,7 @@ struct bnx2x_func_tx_start_params {
        u8 dcb_enabled;
        u8 dcb_version;
        u8 dont_add_pri_0_en;
+       u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
 };
 
 struct bnx2x_func_set_timesync_params {
@@ -1396,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
                         unsigned long *pstate, bnx2x_obj_type type,
                         struct bnx2x_credit_pool_obj *vlans_pool);
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+                            struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+                            u8 cl_id, u32 cid, u8 func_id, void *rdata,
+                            dma_addr_t rdata_mapping, int state,
+                            unsigned long *pstate, bnx2x_obj_type type,
+                            struct bnx2x_credit_pool_obj *macs_pool,
+                            struct bnx2x_credit_pool_obj *vlans_pool);
+
 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
                                        struct bnx2x_vlan_mac_obj *o);
 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
@@ -1466,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
                                 struct bnx2x_credit_pool_obj *p, u8 func_id,
                                 u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+                           int base, int credit);
 
 /****************** RSS CONFIGURATION ****************/
 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
@@ -1493,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp,
 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
                             u8 *ind_table);
 
+#define PF_MAC_CREDIT_E2(bp, func_num)                                 \
+       ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+        func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num)                                         \
+       ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+        func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
 #endif /* BNX2X_SP_VERBS */
index f67348d169667376d47f082fc2da1eaf58843f1e..9d027348cd09b90fcca14843f13e278d54dd89e6 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.c: Broadcom Everest network driver.
+/* bnx2x_sriov.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -195,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
        setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
        setup_p->gen_params.fp_hsi = vf->fp_hsi;
 
-       /* Setup-op pause params:
-        * Nothing to do, the pause thresholds are set by default to 0 which
-        * effectively turns off the feature for this queue. We don't want
-        * one queue (VF) to interfering with another queue (another VF)
-        */
-       if (vf->cfg_flags & VF_CFG_FW_FC)
-               BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
-                         vf->abs_vfid);
        /* Setup-op flags:
         * collect statistics, zero statistics, local-switching, security,
         * OV for Flex10, RSS and MCAST for leading
@@ -358,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
 }
 
 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
-                                  int qid, bool drv_only, bool mac)
+                                  int qid, bool drv_only, int type)
 {
        struct bnx2x_vlan_mac_ramrod_params ramrod;
        int rc;
 
        DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
-          mac ? "MACs" : "VLANs");
+                         (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+                         (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 
        /* Prepare ramrod params */
        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-       if (mac) {
+       if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+               set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+               ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+       } else if (type == BNX2X_VF_FILTER_MAC) {
                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
        } else {
-               set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-                       &ramrod.user_req.vlan_mac_flags);
                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
        }
        ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
@@ -391,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                             &ramrod.ramrod_flags);
        if (rc) {
                BNX2X_ERR("Failed to delete all %s\n",
-                         mac ? "MACs" : "VLANs");
+                         (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+                         (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
                return rc;
        }
 
-       /* Clear the vlan counters */
-       if (!mac)
-               atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
-
        return 0;
 }
 
@@ -412,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 
        DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
           vf->abs_vfid, filter->add ? "Adding" : "Deleting",
-          filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+          (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+          (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 
        /* Prepare ramrod params */
        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-       if (filter->type == BNX2X_VF_FILTER_VLAN) {
-               set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-                       &ramrod.user_req.vlan_mac_flags);
+       if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+               ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+               ramrod.user_req.u.vlan.vlan = filter->vid;
+               memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+               set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+       } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
                ramrod.user_req.u.vlan.vlan = filter->vid;
        } else {
@@ -429,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
        ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
                                            BNX2X_VLAN_MAC_DEL;
 
-       /* Verify there are available vlan credits */
-       if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
-           (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
-            vf_vlan_rules_cnt(vf))) {
-               BNX2X_ERR("No credits for vlan [%d >= %d]\n",
-                         atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
-                         vf_vlan_rules_cnt(vf));
-               return -ENOMEM;
-       }
-
        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
        if (drv_only)
                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
@@ -450,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
        if (rc && rc != -EEXIST) {
                BNX2X_ERR("Failed to %s %s\n",
                          filter->add ? "add" : "delete",
-                         filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
-                                                               "VLAN");
+                         (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+                               "VLAN-MAC" :
+                         (filter->type == BNX2X_VF_FILTER_MAC) ?
+                               "MAC" : "VLAN");
                return rc;
        }
 
-       /* Update the vlan counters */
-       if (filter->type == BNX2X_VF_FILTER_VLAN)
-               bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
-                                    &bnx2x_vfq(vf, qid, vlan_count));
-
        return 0;
 }
 
@@ -511,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
        if (rc)
                goto op_err;
 
-       /* Configure vlan0 for leading queue */
-       if (!qid) {
-               struct bnx2x_vf_mac_vlan_filter filter;
-
-               memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
-               filter.type = BNX2X_VF_FILTER_VLAN;
-               filter.add = true;
-               filter.vid = 0;
-               rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
-               if (rc)
-                       goto op_err;
-       }
-
        /* Schedule the configuration of any pending vlan filters */
-       vf->cfg_flags |= VF_CFG_VLAN;
        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
                               BNX2X_MSG_IOV);
        return 0;
@@ -544,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
        /* If needed, clean the filtering data base */
        if ((qid == LEADING_IDX) &&
            bnx2x_validate_vf_sp_objs(bp, vf, false)) {
-               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+                                            BNX2X_VF_FILTER_VLAN_MAC);
+               if (rc)
+                       goto op_err;
+               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+                                            BNX2X_VF_FILTER_VLAN);
                if (rc)
                        goto op_err;
-               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+                                            BNX2X_VF_FILTER_MAC);
                if (rc)
                        goto op_err;
        }
@@ -680,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
                /* Remove filtering if feasible */
                if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-                                                    false, false);
+                                                    false,
+                                                    BNX2X_VF_FILTER_VLAN_MAC);
+                       if (rc)
+                               goto op_err;
+                       rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+                                                    false,
+                                                    BNX2X_VF_FILTER_VLAN);
                        if (rc)
                                goto op_err;
                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-                                                    false, true);
+                                                    false,
+                                                    BNX2X_VF_FILTER_MAC);
                        if (rc)
                                goto op_err;
                        rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
@@ -765,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
        val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
-       if (vf->cfg_flags & VF_CFG_INT_SIMD)
-               val |= IGU_VF_CONF_SINGLE_ISR_EN;
        val &= ~IGU_VF_CONF_PARENT_MASK;
        val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
@@ -845,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
        return 0;
 }
 
-static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
-                                         struct bnx2x_virtf *vf,
-                                         int new)
-{
-       int num = vf_vlan_rules_cnt(vf);
-       int diff = new - num;
-       bool rc = true;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
-          vf->abs_vfid, new, num);
-
-       if (diff > 0)
-               rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
-       else if (diff < 0)
-               rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
-
-       if (rc)
-               vf_vlan_rules_cnt(vf) = new;
-       else
-               DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
-                  vf->abs_vfid);
-}
-
 /* must be called after the number of PF queues and the number of VFs are
  * both known
  */
@@ -875,21 +833,13 @@ static void
 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
        struct vf_pf_resc_request *resc = &vf->alloc_resc;
-       u16 vlan_count = 0;
 
        /* will be set only during VF-ACQUIRE */
        resc->num_rxqs = 0;
        resc->num_txqs = 0;
 
-       /* no credit calculations for macs (just yet) */
-       resc->num_mac_filters = 1;
-
-       /* divvy up vlan rules */
-       bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
-       vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
-       vlan_count = 1 << ilog2(vlan_count);
-       bnx2x_iov_re_set_vlan_filters(bp, vf,
-                                     vlan_count / BNX2X_NR_VIRTFN(bp));
+       resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+       resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 
        /* no real limitation */
        resc->num_mc_filters = 0;
@@ -1338,6 +1288,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
 
        mutex_init(&bp->vfdb->bulletin_mutex);
 
+       if (SHMEM2_HAS(bp, sriov_switch_mode))
+               SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
        return 0;
 failed:
        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1620,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
                vf->filter_state = 0;
                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
 
+               bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+                                      vf_vlan_rules_cnt(vf));
+               bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+                                      vf_mac_rules_cnt(vf));
+
                /*  init mcast object - This object will be re-initialized
                 *  during VF-ACQUIRE with the proper cl_id and cid.
                 *  It needs to be initialized here so that it can be safely
@@ -2032,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
        u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
        u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 
-       /* Save a vlan filter for the Hypervisor */
        return ((req_resc->num_rxqs <= rxq_cnt) &&
                (req_resc->num_txqs <= txq_cnt) &&
                (req_resc->num_sbs <= vf_sb_count(vf))   &&
                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
-               (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+               (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
 }
 
 /* CORE VF API */
@@ -2091,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
        vf_sb_count(vf) = resc->num_sbs;
        vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
-       if (resc->num_mac_filters)
-               vf_mac_rules_cnt(vf) = resc->num_mac_filters;
-       /* Add an additional vlan filter credit for the hypervisor */
-       bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
 
        DP(BNX2X_MSG_IOV,
           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
           vf_sb_count(vf), vf_rxq_count(vf),
           vf_txq_count(vf), vf_mac_rules_cnt(vf),
-          vf_vlan_rules_visible_cnt(vf));
+          vf_vlan_rules_cnt(vf));
 
        /* Initialize the queues */
        if (!vf->vfqs) {
@@ -2133,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
 {
        struct bnx2x_func_init_params func_init = {0};
-       u16 flags = 0;
        int i;
 
        /* the sb resources are initialized at this point, do the
@@ -2160,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
        /* reset IGU VF statistics: MSIX */
        REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
 
-       /* vf init */
-       if (vf->cfg_flags & VF_CFG_STATS)
-               flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
-
-       if (vf->cfg_flags & VF_CFG_TPA)
-               flags |= FUNC_FLG_TPA;
-
-       if (is_vf_multi(vf))
-               flags |= FUNC_FLG_RSS;
-
        /* function setup */
-       func_init.func_flgs = flags;
        func_init.pf_id = BP_FUNC(bp);
        func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
-       func_init.fw_stat_map = vf->fw_stat_map;
-       func_init.spq_map = vf->spq_map;
-       func_init.spq_prod = 0;
        bnx2x_func_init(bp, &func_init);
 
        /* Enable the vf */
@@ -2589,8 +2527,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
 
        DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
        for_each_vf(bp, vfidx) {
-       bulletin = BP_VF_BULLETIN(bp, vfidx);
-               if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+               bulletin = BP_VF_BULLETIN(bp, vfidx);
+               if (bulletin->valid_bitmap & (1 << VLAN_VALID))
                        bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
        }
 }
@@ -2808,20 +2746,58 @@ out:
        return rc;
 }
 
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+                                        struct bnx2x_virtf *vf, bool accept)
+{
+       struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+       unsigned long accept_flags;
+
+       /* need to remove/add the VF's accept_any_vlan bit */
+       accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+       if (accept)
+               set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+       else
+               clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+       bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+                             accept_flags);
+       bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+       bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                                   u16 vlan, bool add)
 {
-       struct bnx2x_queue_state_params q_params = {NULL};
        struct bnx2x_vlan_mac_ramrod_params ramrod_param;
-       struct bnx2x_queue_update_params *update_params;
+       unsigned long ramrod_flags = 0;
+       int rc = 0;
+
+       /* configure the new vlan to device */
+       memset(&ramrod_param, 0, sizeof(ramrod_param));
+       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+       ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+       ramrod_param.ramrod_flags = ramrod_flags;
+       ramrod_param.user_req.u.vlan.vlan = vlan;
+       ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+                                       : BNX2X_VLAN_MAC_DEL;
+       rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+       if (rc) {
+               BNX2X_ERR("failed to configure vlan\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
        struct pf_vf_bulletin_content *bulletin = NULL;
-       struct bnx2x_rx_mode_ramrod_params rx_ramrod;
        struct bnx2x *bp = netdev_priv(dev);
        struct bnx2x_vlan_mac_obj *vlan_obj;
        unsigned long vlan_mac_flags = 0;
        unsigned long ramrod_flags = 0;
        struct bnx2x_virtf *vf = NULL;
-       unsigned long accept_flags;
-       int rc;
+       int i, rc;
 
        if (vlan > 4095) {
                BNX2X_ERR("illegal vlan value %d\n", vlan);
@@ -2850,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
        bulletin->vlan = vlan;
 
+       /* Post update on VF's bulletin board */
+       rc = bnx2x_post_vf_bulletin(bp, vfidx);
+       if (rc)
+               BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
        mutex_unlock(&bp->vfdb->bulletin_mutex);
 
        /* is vf initialized and queue set up? */
@@ -2876,84 +2856,76 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                goto out;
        }
 
-       /* need to remove/add the VF's accept_any_vlan bit */
-       accept_flags = bnx2x_leading_vfq(vf, accept_flags);
-       if (vlan)
-               clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-       else
-               set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-
-       bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
-                             accept_flags);
-       bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
-       bnx2x_config_rx_mode(bp, &rx_ramrod);
+       /* clear accept_any_vlan when HV forces vlan, otherwise
+        * according to VF capabilities
+        */
+       if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+               bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
 
-       /* configure the new vlan to device */
-       memset(&ramrod_param, 0, sizeof(ramrod_param));
-       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
-       ramrod_param.vlan_mac_obj = vlan_obj;
-       ramrod_param.ramrod_flags = ramrod_flags;
-       set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-               &ramrod_param.user_req.vlan_mac_flags);
-       ramrod_param.user_req.u.vlan.vlan = vlan;
-       ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
-       rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
-       if (rc) {
-               BNX2X_ERR("failed to configure vlan\n");
-               rc =  -EINVAL;
+       rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+       if (rc)
                goto out;
-       }
 
-       /* send queue update ramrod to configure default vlan and silent
-        * vlan removal
+       /* send queue update ramrods to configure default vlan and
+        * silent vlan removal
         */
-       __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
-       q_params.cmd = BNX2X_Q_CMD_UPDATE;
-       q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
-       update_params = &q_params.params.update;
-       __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
-                 &update_params->update_flags);
-       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
-                 &update_params->update_flags);
-       if (vlan == 0) {
-               /* if vlan is 0 then we want to leave the VF traffic
-                * untagged, and leave the incoming traffic untouched
-                * (i.e. do not remove any vlan tags).
-                */
-               __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-                           &update_params->update_flags);
-               __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-                           &update_params->update_flags);
-       } else {
-               /* configure default vlan to vf queue and set silent
-                * vlan removal (the vf remains unaware of this vlan).
-                */
-               __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+       for_each_vfq(vf, i) {
+               struct bnx2x_queue_state_params q_params = {NULL};
+               struct bnx2x_queue_update_params *update_params;
+
+               q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+               /* validate the Q is UP */
+               if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+                   BNX2X_Q_LOGICAL_STATE_ACTIVE)
+                       continue;
+
+               __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+               q_params.cmd = BNX2X_Q_CMD_UPDATE;
+               update_params = &q_params.params.update;
+               __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
                          &update_params->update_flags);
-               __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+               __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
                          &update_params->update_flags);
-               update_params->def_vlan = vlan;
-               update_params->silent_removal_value =
-                       vlan & VLAN_VID_MASK;
-               update_params->silent_removal_mask = VLAN_VID_MASK;
-       }
+               if (vlan == 0) {
+                       /* if vlan is 0 then we want to leave the VF traffic
+                        * untagged, and leave the incoming traffic untouched
+                        * (i.e. do not remove any vlan tags).
+                        */
+                       __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+                                   &update_params->update_flags);
+                       __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+                                   &update_params->update_flags);
+               } else {
+                       /* configure default vlan to vf queue and set silent
+                        * vlan removal (the vf remains unaware of this vlan).
+                        */
+                       __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+                                 &update_params->update_flags);
+                       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+                                 &update_params->update_flags);
+                       update_params->def_vlan = vlan;
+                       update_params->silent_removal_value =
+                               vlan & VLAN_VID_MASK;
+                       update_params->silent_removal_mask = VLAN_VID_MASK;
+               }
 
-       /* Update the Queue state */
-       rc = bnx2x_queue_state_change(bp, &q_params);
-       if (rc) {
-               BNX2X_ERR("Failed to configure default VLAN\n");
-               goto out;
+               /* Update the Queue state */
+               rc = bnx2x_queue_state_change(bp, &q_params);
+               if (rc) {
+                       BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+                                 i);
+                       goto out;
+               }
        }
-
-
-       /* clear the flag indicating that this VF needs its vlan
-        * (will only be set if the HV configured the Vlan before vf was
-        * up and we were called because the VF came up later
-        */
 out:
-       vf->cfg_flags &= ~VF_CFG_VLAN;
        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
 
+       if (rc)
+               DP(BNX2X_MSG_IOV,
+                  "updated VF[%d] vlan configuration (vlan = %d)\n",
+                  vfidx, vlan);
+
        return rc;
 }
 
index 66ee62a0401a86c9e44db1afdb9ebac0b7ebf49e..670a581ffabc7ee64b01e15170fac9bd194e5186 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.h: Broadcom Everest network driver.
+/* bnx2x_sriov.h: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -75,7 +77,10 @@ struct bnx2x_vf_queue {
 
        /* VLANs object */
        struct bnx2x_vlan_mac_obj       vlan_obj;
-       atomic_t vlan_count;            /* 0 means vlan-0 is set  ~ untagged */
+
+       /* VLAN-MACs object */
+       struct bnx2x_vlan_mac_obj       vlan_mac_obj;
+
        unsigned long accept_flags;     /* last accept flags configured */
 
        /* Queue Slow-path State object */
@@ -103,8 +108,10 @@ struct bnx2x_virtf;
 
 struct bnx2x_vf_mac_vlan_filter {
        int type;
-#define BNX2X_VF_FILTER_MAC    1
-#define BNX2X_VF_FILTER_VLAN   2
+#define BNX2X_VF_FILTER_MAC    BIT(0)
+#define BNX2X_VF_FILTER_VLAN   BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+       (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
 
        bool add;
        u8 *mac;
@@ -119,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters {
 /* vf context */
 struct bnx2x_virtf {
        u16 cfg_flags;
-#define VF_CFG_STATS           0x0001
-#define VF_CFG_FW_FC           0x0002
-#define VF_CFG_TPA             0x0004
-#define VF_CFG_INT_SIMD                0x0008
-#define VF_CACHE_LINE          0x0010
-#define VF_CFG_VLAN            0x0020
-#define VF_CFG_STATS_COALESCE  0x0040
-#define VF_CFG_EXT_BULLETIN    0x0080
+#define VF_CFG_STATS_COALESCE  0x1
+#define VF_CFG_EXT_BULLETIN    0x2
+#define VF_CFG_VLAN_FILTER     0x4
        u8 link_cfg;            /* IFLA_VF_LINK_STATE_AUTO
                                 * IFLA_VF_LINK_STATE_ENABLE
                                 * IFLA_VF_LINK_STATE_DISABLE
@@ -140,9 +142,8 @@ struct bnx2x_virtf {
        bool flr_clnup_stage;   /* true during flr cleanup */
 
        /* dma */
-       dma_addr_t fw_stat_map;         /* valid iff VF_CFG_STATS */
+       dma_addr_t fw_stat_map;
        u16 stats_stride;
-       dma_addr_t spq_map;
        dma_addr_t bulletin_map;
 
        /* Allocated resources counters. Before the VF is acquired, the
@@ -163,8 +164,6 @@ struct bnx2x_virtf {
 #define vf_mac_rules_cnt(vf)           ((vf)->alloc_resc.num_mac_filters)
 #define vf_vlan_rules_cnt(vf)          ((vf)->alloc_resc.num_vlan_filters)
 #define vf_mc_rules_cnt(vf)            ((vf)->alloc_resc.num_mc_filters)
-       /* Hide a single vlan filter credit for the hypervisor */
-#define vf_vlan_rules_visible_cnt(vf)  (vf_vlan_rules_cnt(vf) - 1)
 
        u8 sb_count;    /* actual number of SBs */
        u8 igu_base_id; /* base igu status block id */
@@ -207,6 +206,9 @@ struct bnx2x_virtf {
        enum channel_tlvs               op_current;
 
        u8 fp_hsi;
+
+       struct bnx2x_credit_pool_obj    vf_vlans_pool;
+       struct bnx2x_credit_pool_obj    vf_macs_pool;
 };
 
 #define BNX2X_NR_VIRTFN(bp)    ((bp)->vfdb->sriov.nr_virtfn)
@@ -230,6 +232,12 @@ struct bnx2x_virtf {
 #define FW_VF_HANDLE(abs_vfid) \
        (abs_vfid + FW_PF_MAX_HANDLE)
 
+#define GET_NUM_VFS_PER_PATH(bp)       64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp)         ((bp)->vfdb ? (bp)->vfdb->sriov.total \
+                                                   : 0)
+#define VF_MAC_CREDIT_CNT              1
+#define VF_VLAN_CREDIT_CNT             2 /* VLAN0 + 'real' VLAN */
+
 /* locking and unlocking the channel mutex */
 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
                              enum channel_tlvs tlv);
@@ -273,6 +281,10 @@ struct bnx2x_vf_sp {
                struct eth_classify_rules_ramrod_data   e2;
        } vlan_rdata;
 
+       union {
+               struct eth_classify_rules_ramrod_data   e2;
+       } vlan_mac_rdata;
+
        union {
                struct eth_filter_rules_ramrod_data     e2;
        } rx_mode_rdata;
@@ -536,8 +548,14 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
 
 int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
 
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
 #else /* CONFIG_BNX2X_SRIOV */
 
+#define GET_NUM_VFS_PER_PATH(bp)       0
+#define GET_NUM_VFS_PER_PF(bp)         0
+#define VF_MAC_CREDIT_CNT              0
+#define VF_VLAN_CREDIT_CNT             0
+
 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
                                struct bnx2x_queue_sp_obj **q_obj) {}
 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
@@ -604,5 +622,7 @@ struct pf_vf_bulletin_content;
 static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
                                              bool support_long) {}
 
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
index 69d699f0730a3bd4d8980607e0a36cd8da461f1e..7e0919aa450e754d444bc7fb54ec4d2375fabcb8 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_stats.c: Broadcom Everest network driver.
+/* bnx2x_stats.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 965539a9dabe7e4702e1ba6b382aefc69fb26fac..b2644ed13d064eacc3b34cf59d48b156bedac16b 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_stats.h: Broadcom Everest network driver.
+/* bnx2x_stats.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 06b8c0d8fd3b12ab4e864c8c0971cc52380c007c..1374e5394a7970ba20ad54ddfc6271ce09e40744 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_vfpf.c: Broadcom Everest network driver.
+/* bnx2x_vfpf.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -245,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
        req->resc_request.num_sbs = bp->igu_sb_cnt;
        req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
        req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+       req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
 
        /* pf 2 vf bulletin board address */
        req->bulletin_addr = bp->pf2vf_bulletin_mapping;
@@ -255,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
 
        /* Bulletin support for bulletin board with length > legacy length */
        req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+       /* vlan filtering is supported */
+       req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
 
        /* add list termination tlv */
        bnx2x_add_tlv(bp, req,
@@ -373,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
                NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
        bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
        bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+       bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
        strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
                sizeof(bp->fw_ver));
 
@@ -546,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
                           BNX2X_FILTER_MAC_PENDING,
                           &vf->filter_state,
                           BNX2X_OBJ_TYPE_RX_TX,
-                          &bp->macs_pool);
+                          &vf->vf_macs_pool);
        /* vlan */
        bnx2x_init_vlan_obj(bp, &q->vlan_obj,
                            cl_id, q->cid, func_id,
@@ -555,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
                            BNX2X_FILTER_VLAN_PENDING,
                            &vf->filter_state,
                            BNX2X_OBJ_TYPE_RX_TX,
-                           &bp->vlans_pool);
-
+                           &vf->vf_vlans_pool);
+       /* vlan-mac */
+       bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+                               cl_id, q->cid, func_id,
+                               bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+                               bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+                               BNX2X_FILTER_VLAN_MAC_PENDING,
+                               &vf->filter_state,
+                               BNX2X_OBJ_TYPE_RX_TX,
+                               &vf->vf_macs_pool,
+                               &vf->vf_vlans_pool);
        /* mcast */
        bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
                             q->cid, func_id, func_id,
@@ -723,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 
        req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
        if (set)
-               req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+               req->filters[0].flags |= VFPF_Q_FILTER_SET;
 
        /* sample bulletin board for new mac */
        bnx2x_sample_bulletin(bp);
@@ -911,6 +927,67 @@ out:
        return 0;
 }
 
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+       struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+       struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+       int rc = 0;
+
+       if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+               DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+               return 0;
+       }
+
+       /* clear mailbox and prep first tlv */
+       bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+                       sizeof(*req));
+
+       req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+       req->vf_qid = vf_qid;
+       req->n_mac_vlan_filters = 1;
+
+       req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+       if (add)
+               req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+       /* sample bulletin board for hypervisor vlan */
+       bnx2x_sample_bulletin(bp);
+
+       if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+               BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+               rc = -EINVAL;
+               goto out;
+       }
+
+       req->filters[0].vlan_tag = vid;
+
+       /* add list termination tlv */
+       bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       /* output tlvs list */
+       bnx2x_dp_tlv_list(bp, req);
+
+       /* send message to pf */
+       rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+       if (rc) {
+               BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+               goto out;
+       }
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+                         vid);
+               rc = -EINVAL;
+       }
+out:
+       bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+       return rc;
+}
+
 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
 {
        int mode = bp->rx_mode;
@@ -934,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
                req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
                req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
                req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+               if (mode == BNX2X_RX_MODE_PROMISC)
+                       req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
        }
 
+       if (bp->accept_any_vlan)
+               req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
        req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
        req->vf_qid = 0;
 
@@ -1188,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
        resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
        resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
                                   PFVF_CAP_TPA |
-                                  PFVF_CAP_TPA_UPDATE);
+                                  PFVF_CAP_TPA_UPDATE |
+                                  PFVF_CAP_VLAN_FILTER);
        bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
                          sizeof(resp->pfdev_info.fw_ver));
 
@@ -1203,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                        bnx2x_vf_max_queue_cnt(bp, vf);
                resc->num_sbs = vf_sb_count(vf);
                resc->num_mac_filters = vf_mac_rules_cnt(vf);
-               resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+               resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
                resc->num_mc_filters = 0;
 
                if (status == PFVF_STATUS_SUCCESS) {
@@ -1370,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
                vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
        }
 
+       if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+               DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+                  vf->abs_vfid);
+               vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+       } else {
+               vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+       }
+
 out:
        /* response */
        bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
@@ -1382,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
        int rc;
 
        /* record ghost addresses from vf message */
-       vf->spq_map = init->spq_addr;
        vf->fw_stat_map = init->stats_addr;
        vf->stats_stride = init->stats_stride;
        rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
@@ -1578,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
 
                if ((msg_filter->flags & type_flag) != type_flag)
                        continue;
-               if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+               memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+               if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
                        fl->filters[j].mac = msg_filter->mac;
-                       fl->filters[j].type = BNX2X_VF_FILTER_MAC;
-               } else {
+                       fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
+               }
+               if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
                        fl->filters[j].vid = msg_filter->vlan_tag;
-                       fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+                       fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
                }
-               fl->filters[j].add =
-                       (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
-                       true : false;
+               fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
                fl->count++;
+               j++;
        }
        if (!fl->count)
                kfree(fl);
@@ -1598,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
        return 0;
 }
 
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+                                   u32 flags)
+{
+       int i, cnt = 0;
+
+       for (i = 0; i < filters->n_mac_vlan_filters; i++)
+               if  ((filters->filters[i].flags & flags) == flags)
+                       cnt++;
+
+       return cnt;
+}
+
 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
                                       struct vfpf_q_mac_vlan_filter *filter)
 {
@@ -1629,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
 
 #define VFPF_MAC_FILTER                VFPF_Q_FILTER_DEST_MAC_VALID
 #define VFPF_VLAN_FILTER       VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER   (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
 
 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
@@ -1639,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
        /* check for any mac/vlan changes */
        if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
-               /* build mac list */
                struct bnx2x_vf_mac_vlan_filters *fl = NULL;
 
+               /* build vlan-mac list */
                rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-                                              VFPF_MAC_FILTER);
+                                              VFPF_VLAN_MAC_FILTER);
                if (rc)
                        goto op_err;
 
                if (fl) {
 
-                       /* set mac list */
+                       /* set vlan-mac list */
                        rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
                                                           msg->vf_qid,
                                                           false);
@@ -1657,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
                                goto op_err;
                }
 
-               /* build vlan list */
+               /* build mac list */
                fl = NULL;
 
                rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-                                              VFPF_VLAN_FILTER);
+                                              VFPF_MAC_FILTER);
                if (rc)
                        goto op_err;
 
                if (fl) {
-                       /* set vlan list */
+                       /* set mac list */
                        rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
                                                           msg->vf_qid,
                                                           false);
                        if (rc)
                                goto op_err;
                }
+
        }
 
        if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
@@ -1687,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
                        __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
                }
 
-               /* A packet arriving the vf's mac should be accepted
-                * with any vlan, unless a vlan has already been
-                * configured.
+               /* any_vlan is not configured if HV is forcing VLAN
+                * any_vlan is configured if
+                *   1. VF does not support vlan filtering
+                *   OR
+                *   2. VF supports vlan filtering and explicitly requested it
                 */
-               if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+               if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+                   (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+                    msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
                        __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
 
                /* set rx-mode */
@@ -1727,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
         * since queue was not set up.
         */
        if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
-               /* once a mac was set by ndo can only accept a single mac... */
-               if (filters->n_mac_vlan_filters > 1) {
-                       BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
-                                 vf->abs_vfid);
-                       rc = -EPERM;
-                       goto response;
+               struct vfpf_q_mac_vlan_filter *filter = NULL;
+               int i;
+
+               for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+                       if (!(filters->filters[i].flags &
+                             VFPF_Q_FILTER_DEST_MAC_VALID))
+                               continue;
+
+                       /* once a mac was set by ndo can only accept
+                        * a single mac...
+                        */
+                       if (filter) {
+                               BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+                                         vf->abs_vfid,
+                                         filters->n_mac_vlan_filters);
+                               rc = -EPERM;
+                               goto response;
+                       }
+
+                       filter = &filters->filters[i];
                }
 
                /* ...and only the mac set by the ndo */
-               if (filters->n_mac_vlan_filters == 1 &&
-                   !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+               if (filter &&
+                   !ether_addr_equal(filter->mac, bulletin->mac)) {
                        BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
                                  vf->abs_vfid);
 
@@ -1759,17 +1882,14 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
 
        /* if vlan was set by hypervisor we don't allow guest to config vlan */
        if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
-               int i;
-
                /* search for vlan filters */
-               for (i = 0; i < filters->n_mac_vlan_filters; i++) {
-                       if (filters->filters[i].flags &
-                           VFPF_Q_FILTER_VLAN_TAG_VALID) {
-                               BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
-                                         vf->abs_vfid);
-                               rc = -EPERM;
-                               goto response;
-                       }
+
+               if (bnx2x_vf_filters_contain(filters,
+                                            VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+                       BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+                                 vf->abs_vfid);
+                       rc = -EPERM;
+                       goto response;
                }
        }
 
index b86479fc0d2f80adc9a066da4e26ee9b50d80bc9..64f2b52c58293964ad55a77caa47990312d48df6 100644 (file)
@@ -1,16 +1,22 @@
-/* bnx2x_vfpf.h: Broadcom Everest network driver.
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
  *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module.  An independent module is a module which is
+ * not derived from this software.  The special exception does not apply to any
+ * modifications of the software.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Ariel Elior <ariel.elior@qlogic.com>
@@ -64,6 +70,8 @@ struct hw_sb_info {
 #define VFPF_RX_MASK_ACCEPT_ALL_UNICAST                0x00000004
 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST      0x00000008
 #define VFPF_RX_MASK_ACCEPT_BROADCAST          0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN           0x00000020
+
 #define BULLETIN_CONTENT_SIZE          (sizeof(struct pf_vf_bulletin_content))
 #define BULLETIN_CONTENT_LEGACY_SIZE   (32)
 #define BULLETIN_ATTEMPTS      5 /* crc failures before throwing towel */
@@ -127,6 +135,7 @@ struct vfpf_acquire_tlv {
                u8 fp_hsi_ver;
                u8 caps;
 #define VF_CAP_SUPPORT_EXT_BULLETIN    (1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER     (1 << 1)
        } vfdev_info;
 
        struct vf_pf_resc_request resc_request;
@@ -168,10 +177,12 @@ struct pfvf_acquire_resp_tlv {
        struct pf_vf_pfdev_info {
                u32 chip_num;
                u32 pf_cap;
-#define PFVF_CAP_RSS           0x00000001
-#define PFVF_CAP_DHC           0x00000002
-#define PFVF_CAP_TPA           0x00000004
-#define PFVF_CAP_TPA_UPDATE    0x00000008
+#define PFVF_CAP_RSS          0x00000001
+#define PFVF_CAP_DHC          0x00000002
+#define PFVF_CAP_TPA          0x00000004
+#define PFVF_CAP_TPA_UPDATE   0x00000008
+#define PFVF_CAP_VLAN_FILTER  0x00000010
+
                char fw_ver[32];
                u16 db_size;
                u8  indices_per_sb;
@@ -288,7 +299,7 @@ struct vfpf_q_mac_vlan_filter {
        u32 flags;
 #define VFPF_Q_FILTER_DEST_MAC_VALID   0x01
 #define VFPF_Q_FILTER_VLAN_TAG_VALID   0x02
-#define VFPF_Q_FILTER_SET_MAC          0x100   /* set/clear */
+#define VFPF_Q_FILTER_SET              0x100   /* set/clear */
        u8  mac[ETH_ALEN];
        u16 vlan_tag;
 };
index 64c1e9db6b0b5420687c1c083cc20b8adb7de5bd..eb080ef8ee97fec11fe3c03eee9f9ce8cac8eeb7 100644 (file)
@@ -907,9 +907,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
        }
 
        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-
        if (mode == GENET_POWER_PASSIVE)
-               bcmgenet_mii_reset(priv->dev);
+               bcmgenet_phy_power_set(priv->dev, true);
 }
 
 /* ioctl handle special commands that are not present in ethtool. */
@@ -1725,7 +1724,7 @@ static int init_umac(struct bcmgenet_priv *priv)
        int0_enable |= UMAC_IRQ_TXDMA_DONE;
 
        /* Monitor cable plug/unplugged event for internal PHY */
-       if (phy_is_internal(priv->phydev)) {
+       if (priv->internal_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
        } else if (priv->ext_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
@@ -2389,6 +2388,23 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcmgenet_poll_controller(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       /* Invoke the main RX/TX interrupt handler */
+       disable_irq(priv->irq0);
+       bcmgenet_isr0(priv->irq0, priv);
+       enable_irq(priv->irq0);
+
+       /* And the interrupt handler for RX/TX priority queues */
+       disable_irq(priv->irq1);
+       bcmgenet_isr1(priv->irq1, priv);
+       enable_irq(priv->irq1);
+}
+#endif
+
 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
 {
        u32 reg;
@@ -2626,13 +2642,12 @@ static int bcmgenet_open(struct net_device *dev)
        netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
 
        /* Turn on the clock */
-       if (!IS_ERR(priv->clk))
-               clk_prepare_enable(priv->clk);
+       clk_prepare_enable(priv->clk);
 
        /* If this is an internal GPHY, power it back on now, before UniMAC is
         * brought out of reset as absolutely no UniMAC activity is allowed
         */
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
        /* take MAC out of reset */
@@ -2651,7 +2666,7 @@ static int bcmgenet_open(struct net_device *dev)
 
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-       if (phy_is_internal(priv->phydev)) {
+       if (priv->internal_phy) {
                reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
                reg |= EXT_ENERGY_DET_MASK;
                bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
@@ -2687,23 +2702,24 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_irq0;
        }
 
-       /* Re-configure the port multiplexer towards the PHY device */
-       bcmgenet_mii_config(priv->dev, false);
-
-       phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
-                          priv->phy_interface);
+       ret = bcmgenet_mii_probe(dev);
+       if (ret) {
+               netdev_err(dev, "failed to connect to PHY\n");
+               goto err_irq1;
+       }
 
        bcmgenet_netif_start(dev);
 
        return 0;
 
+err_irq1:
+       free_irq(priv->irq1, priv);
 err_irq0:
-       free_irq(priv->irq0, dev);
+       free_irq(priv->irq0, priv);
 err_fini_dma:
        bcmgenet_fini_dma(priv);
 err_clk_disable:
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
        return ret;
 }
 
@@ -2757,11 +2773,10 @@ static int bcmgenet_close(struct net_device *dev)
        free_irq(priv->irq0, priv);
        free_irq(priv->irq1, priv);
 
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return ret;
 }
@@ -2941,6 +2956,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
        .ndo_set_mac_address    = bcmgenet_set_mac_addr,
        .ndo_do_ioctl           = bcmgenet_ioctl,
        .ndo_set_features       = bcmgenet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = bcmgenet_poll_controller,
+#endif
 };
 
 /* Array of GENET hardware parameters/characteristics */
@@ -3214,11 +3232,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
                priv->version = pd->genet_version;
 
        priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
-       if (IS_ERR(priv->clk))
+       if (IS_ERR(priv->clk)) {
                dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+               priv->clk = NULL;
+       }
 
-       if (!IS_ERR(priv->clk))
-               clk_prepare_enable(priv->clk);
+       clk_prepare_enable(priv->clk);
 
        bcmgenet_set_hw_params(priv);
 
@@ -3229,8 +3248,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
        INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
 
        priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
-       if (IS_ERR(priv->clk_wol))
+       if (IS_ERR(priv->clk_wol)) {
                dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+               priv->clk_wol = NULL;
+       }
 
        priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
        if (IS_ERR(priv->clk_eee)) {
@@ -3256,8 +3277,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        netif_carrier_off(dev);
 
        /* Turn off the main clock, WOL clock is handled separately */
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        err = register_netdev(dev);
        if (err)
@@ -3266,8 +3286,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        return err;
 
 err_clk_disable:
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
 err:
        free_netdev(dev);
        return err;
@@ -3319,7 +3338,7 @@ static int bcmgenet_suspend(struct device *d)
        if (device_may_wakeup(d) && priv->wolopts) {
                ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
                clk_prepare_enable(priv->clk_wol);
-       } else if (phy_is_internal(priv->phydev)) {
+       } else if (priv->internal_phy) {
                ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
        }
 
@@ -3348,7 +3367,7 @@ static int bcmgenet_resume(struct device *d)
        /* If this is an internal GPHY, power it back on now, before UniMAC is
         * brought out of reset as absolutely no UniMAC activity is allowed
         */
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
        bcmgenet_umac_reset(priv);
@@ -3363,14 +3382,14 @@ static int bcmgenet_resume(struct device *d)
 
        phy_init_hw(priv->phydev);
        /* Speed settings must be restored */
-       bcmgenet_mii_config(priv->dev, false);
+       bcmgenet_mii_config(priv->dev);
 
        /* disable ethernet MAC while updating its registers */
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
 
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-       if (phy_is_internal(priv->phydev)) {
+       if (priv->internal_phy) {
                reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
                reg |= EXT_ENERGY_DET_MASK;
                bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
index 6159deab8c9850a0231ef3b3f1fad6dfaa31a588..7299d10754226680e71ace26cbe4f996c8867127 100644 (file)
@@ -593,6 +593,7 @@ struct bcmgenet_priv {
        /* MDIO bus variables */
        wait_queue_head_t wq;
        struct phy_device *phydev;
+       bool internal_phy;
        struct device_node *phy_dn;
        struct device_node *mdio_dn;
        struct mii_bus *mii_bus;
@@ -670,9 +671,9 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_probe(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
-void bcmgenet_mii_reset(struct net_device *dev);
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
 void bcmgenet_mii_setup(struct net_device *dev);
 
index adf23d2ac4888e89f63c4246e7c3b33eaf3d0fd0..b3679ad1c1c73a62bc2e4d30e39e7505cc39c48e 100644 (file)
@@ -163,14 +163,13 @@ void bcmgenet_mii_setup(struct net_device *dev)
        phy_print_status(phydev);
 }
 
-void bcmgenet_mii_reset(struct net_device *dev)
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+                                         struct fixed_phy_status *status)
 {
-       struct bcmgenet_priv *priv = netdev_priv(dev);
+       if (dev && dev->phydev && status)
+               status->link = dev->phydev->link;
 
-       if (priv->phydev) {
-               phy_init_hw(priv->phydev);
-               phy_start_aneg(priv->phydev);
-       }
+       return 0;
 }
 
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
@@ -215,7 +214,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
        reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
        reg |= EXT_PWR_DN_EN_LD;
        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-       bcmgenet_mii_reset(dev);
 }
 
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
@@ -226,9 +224,13 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
        reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
        reg |= LED_ACT_SOURCE_MAC;
        bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+       if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+               fixed_phy_set_link_update(priv->phydev,
+                                         bcmgenet_fixed_phy_link_update);
 }
 
-int bcmgenet_mii_config(struct net_device *dev, bool init)
+int bcmgenet_mii_config(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
@@ -238,10 +240,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
        u32 port_ctrl;
        u32 reg;
 
-       priv->ext_phy = !phy_is_internal(priv->phydev) &&
+       priv->ext_phy = !priv->internal_phy &&
                        (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
 
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                priv->phy_interface = PHY_INTERFACE_MODE_NA;
 
        switch (priv->phy_interface) {
@@ -259,7 +261,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
 
                bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
 
-               if (phy_is_internal(priv->phydev)) {
+               if (priv->internal_phy) {
                        phy_name = "internal PHY";
                        bcmgenet_internal_phy_setup(dev);
                } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -321,13 +323,12 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
        }
 
-       if (init)
-               dev_info(kdev, "configuring instance for %s\n", phy_name);
+       dev_info_once(kdev, "configuring instance for %s\n", phy_name);
 
        return 0;
 }
 
-static int bcmgenet_mii_probe(struct net_device *dev)
+int bcmgenet_mii_probe(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct device_node *dn = priv->pdev->dev.of_node;
@@ -345,22 +346,6 @@ static int bcmgenet_mii_probe(struct net_device *dev)
        priv->old_pause = -1;
 
        if (dn) {
-               if (priv->phydev) {
-                       pr_info("PHY already attached\n");
-                       return 0;
-               }
-
-               /* In the case of a fixed PHY, the DT node associated
-                * to the PHY is the Ethernet MAC DT node.
-                */
-               if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
-                       ret = of_phy_register_fixed_link(dn);
-                       if (ret)
-                               return ret;
-
-                       priv->phy_dn = of_node_get(dn);
-               }
-
                phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
                                        phy_flags, priv->phy_interface);
                if (!phydev) {
@@ -386,7 +371,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
         * PHY speed which is needed for bcmgenet_mii_config() to configure
         * things appropriately.
         */
-       ret = bcmgenet_mii_config(dev, true);
+       ret = bcmgenet_mii_config(dev);
        if (ret) {
                phy_disconnect(priv->phydev);
                return ret;
@@ -397,14 +382,11 @@ static int bcmgenet_mii_probe(struct net_device *dev)
        /* The internal PHY has its link interrupts routed to the
         * Ethernet MAC ISRs
         */
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
        else
                priv->mii_bus->irq[phydev->addr] = PHY_POLL;
 
-       pr_info("attached PHY at address %d [%s]\n",
-               phydev->addr, phydev->drv->name);
-
        return 0;
 }
 
@@ -490,7 +472,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
 {
        struct device_node *dn = priv->pdev->dev.of_node;
        struct device *kdev = &priv->pdev->dev;
+       const char *phy_mode_str = NULL;
+       struct phy_device *phydev = NULL;
        char *compat;
+       int phy_mode;
        int ret;
 
        compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
@@ -513,17 +498,43 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
        /* Fetch the PHY phandle */
        priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
 
+       /* In the case of a fixed PHY, the DT node associated
+        * to the PHY is the Ethernet MAC DT node.
+        */
+       if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+               ret = of_phy_register_fixed_link(dn);
+               if (ret)
+                       return ret;
+
+               priv->phy_dn = of_node_get(dn);
+       }
+
        /* Get the link mode */
-       priv->phy_interface = of_get_phy_mode(dn);
+       phy_mode = of_get_phy_mode(dn);
+       priv->phy_interface = phy_mode;
 
-       return 0;
-}
+       /* We need to specifically look up whether this PHY interface is internal
+        * or not *before* we even try to probe the PHY driver over MDIO as we
+        * may have shut down the internal PHY for power saving purposes.
+        */
+       if (phy_mode < 0) {
+               ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
+               if (ret < 0) {
+                       dev_err(kdev, "invalid PHY mode property\n");
+                       return ret;
+               }
 
-static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
-                                         struct fixed_phy_status *status)
-{
-       if (dev && dev->phydev && status)
-               status->link = dev->phydev->link;
+               priv->phy_interface = PHY_INTERFACE_MODE_NA;
+               if (!strcasecmp(phy_mode_str, "internal"))
+                       priv->internal_phy = true;
+       }
+
+       /* Make sure we initialize MoCA PHYs with a link down */
+       if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+               phydev = of_phy_find_device(dn);
+               if (phydev)
+                       phydev->link = 0;
+       }
 
        return 0;
 }
@@ -580,12 +591,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
                        return -ENODEV;
                }
 
-               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
-                       ret = fixed_phy_set_link_update(
-                               phydev, bcmgenet_fixed_phy_link_update);
-                       if (!ret)
-                               phydev->link = 0;
-               }
+               /* Make sure we initialize MoCA PHYs with a link down */
+               phydev->link = 0;
+
        }
 
        priv->phydev = phydev;
@@ -614,10 +622,6 @@ int bcmgenet_mii_init(struct net_device *dev)
                return ret;
 
        ret = bcmgenet_mii_bus_init(priv);
-       if (ret)
-               goto out_free;
-
-       ret = bcmgenet_mii_probe(dev);
        if (ret)
                goto out;
 
@@ -626,7 +630,6 @@ int bcmgenet_mii_init(struct net_device *dev)
 out:
        of_node_put(priv->phy_dn);
        mdiobus_unregister(priv->mii_bus);
-out_free:
        kfree(priv->mii_bus->irq);
        mdiobus_free(priv->mii_bus);
        return ret;
index ac27e24264a5666e4e7418ee6fd252868b617b09..f557a2aaec231ff0669db71a1b6eee07cca7d90f 100644 (file)
@@ -1508,16 +1508,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
        __raw_writeq(reg, port);
        port = s->sbm_base + R_MAC_ETHERNET_ADDR;
 
-#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
-       /*
-        * Pass1 SOCs do not receive packets addressed to the
-        * destination address in the R_MAC_ETHERNET_ADDR register.
-        * Set the value to zero.
-        */
-       __raw_writeq(0, port);
-#else
        __raw_writeq(reg, port);
-#endif
 
        /*
         * Set the receive filter for no packets, and write values
index caeb39561567237261ac0d50befebad666cfbeb3..88c1e1a834f8c44491c76269be7c6aa805906ab2 100644 (file)
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
        return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
 }
 
+/* I/O accessors */
+static u32 hw_readl_native(struct macb *bp, int offset)
+{
+       return __raw_readl(bp->regs + offset);
+}
+
+static void hw_writel_native(struct macb *bp, int offset, u32 value)
+{
+       __raw_writel(value, bp->regs + offset);
+}
+
+static u32 hw_readl(struct macb *bp, int offset)
+{
+       return readl_relaxed(bp->regs + offset);
+}
+
+static void hw_writel(struct macb *bp, int offset, u32 value)
+{
+       writel_relaxed(value, bp->regs + offset);
+}
+
+/*
+ * Find the CPU endianness by using the loopback bit of NCR register. When the
+ * CPU is in big endian we need to program swaped mode for management
+ * descriptor access.
+ */
+static bool hw_is_native_io(void __iomem *addr)
+{
+       u32 value = MACB_BIT(LLB);
+
+       __raw_writel(value, addr + MACB_NCR);
+       value = __raw_readl(addr + MACB_NCR);
+
+       /* Write 0 back to disable everything */
+       __raw_writel(0, addr + MACB_NCR);
+
+       return value == MACB_BIT(LLB);
+}
+
+static bool hw_is_gem(void __iomem *addr, bool native_io)
+{
+       u32 id;
+
+       if (native_io)
+               id = __raw_readl(addr + MACB_MID);
+       else
+               id = readl_relaxed(addr + MACB_MID);
+
+       return MACB_BFEXT(IDNUM, id) >= 0x2;
+}
+
 static void macb_set_hwaddr(struct macb *bp)
 {
        u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
                }
        }
 
-       netdev_info(bp->dev, "invalid hw address, using random\n");
+       dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
        eth_hw_addr_random(bp->dev);
 }
 
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
        struct macb *bp = netdev_priv(dev);
        struct phy_device *phydev = bp->phy_dev;
        unsigned long flags;
-
        int status_change = 0;
 
        spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
 
 static void macb_update_stats(struct macb *bp)
 {
-       u32 __iomem *reg = bp->regs + MACB_PFR;
        u32 *p = &bp->hw_stats.macb.rx_pause_frames;
        u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+       int offset = MACB_PFR;
 
        WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
 
-       for(; p < end; p++, reg++)
-               *p += readl_relaxed(reg);
+       for(; p < end; p++, offset += 4)
+               *p += bp->macb_reg_readl(bp, offset);
 }
 
 static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
 }
 #endif
 
-static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
-                                                    unsigned int len)
-{
-       return (len + bp->max_tx_length - 1) / bp->max_tx_length;
-}
-
 static unsigned int macb_tx_map(struct macb *bp,
                                struct macb_queue *queue,
                                struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * socket buffer: skb fragments of jumbo frames may need to be
         * splitted into many buffer descriptors.
         */
-       count = macb_count_tx_descriptors(bp, skb_headlen(skb));
+       count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
        nr_frags = skb_shinfo(skb)->nr_frags;
        for (f = 0; f < nr_frags; f++) {
                frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
-               count += macb_count_tx_descriptors(bp, frag_size);
+               count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
        }
 
        spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
 static void macb_configure_dma(struct macb *bp)
 {
        u32 dmacfg;
-       u32 tmp, ncr;
 
        if (macb_is_gem(bp)) {
                dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
                dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
                dmacfg &= ~GEM_BIT(ENDIA_PKT);
 
-               /* Find the CPU endianness by using the loopback bit of net_ctrl
-                * register. save it first. When the CPU is in big endian we
-                * need to program swaped mode for management descriptor access.
-                */
-               ncr = macb_readl(bp, NCR);
-               __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
-               tmp =  __raw_readl(bp->regs + MACB_NCR);
-
-               if (tmp == MACB_BIT(LLB))
+               if (bp->native_io)
                        dmacfg &= ~GEM_BIT(ENDIA_DESC);
                else
                        dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
 
-               /* Restore net_ctrl */
-               macb_writel(bp, NCR, ncr);
-
                if (bp->dev->features & NETIF_F_HW_CSUM)
                        dmacfg |= GEM_BIT(TXCOEN);
                else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
 
 static void gem_update_stats(struct macb *bp)
 {
-       int i;
+       unsigned int i;
        u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
 
        for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
                u32 offset = gem_statistics[i].offset;
-               u64 val = readl_relaxed(bp->regs + offset);
+               u64 val = bp->macb_reg_readl(bp, offset);
 
                bp->ethtool_stats[i] += val;
                *p += val;
 
                if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
                        /* Add GEM_OCTTXH, GEM_OCTRXH */
-                       val = readl_relaxed(bp->regs + offset + 4);
+                       val = bp->macb_reg_readl(bp, offset + 4);
                        bp->ethtool_stats[i] += ((u64)val) << 32;
                        *(++p) += val;
                }
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
 
 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
 {
-       int i;
+       unsigned int i;
 
        switch (sset) {
        case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
        if (dt_conf)
                bp->caps = dt_conf->caps;
 
-       if (macb_is_gem_hw(bp->regs)) {
+       if (hw_is_gem(bp->regs, bp->native_io)) {
                bp->caps |= MACB_CAPS_MACB_IS_GEM;
 
                dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
                        bp->caps |= MACB_CAPS_FIFO_MODE;
        }
 
-       netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
+       dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
 }
 
 static void macb_probe_queues(void __iomem *mem,
+                             bool native_io,
                              unsigned int *queue_mask,
                              unsigned int *num_queues)
 {
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
         * we are early in the probe process and don't have the
         * MACB_CAPS_MACB_IS_GEM flag positioned
         */
-       if (!macb_is_gem_hw(mem))
+       if (!hw_is_gem(mem, native_io))
                return;
 
        /* bit 0 is never set but queue 0 always exists */
@@ -2741,8 +2774,7 @@ static const struct macb_config emac_config = {
 
 
 static const struct macb_config zynqmp_config = {
-       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
-               MACB_CAPS_JUMBO,
+       .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
@@ -2750,8 +2782,7 @@ static const struct macb_config zynqmp_config = {
 };
 
 static const struct macb_config zynq_config = {
-       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
-               MACB_CAPS_NO_GIGABIT_HALF,
+       .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
@@ -2786,6 +2817,7 @@ static int macb_probe(struct platform_device *pdev)
        struct clk *pclk, *hclk, *tx_clk;
        unsigned int queue_mask, num_queues;
        struct macb_platform_data *pdata;
+       bool native_io;
        struct phy_device *phydev;
        struct net_device *dev;
        struct resource *regs;
@@ -2794,6 +2826,11 @@ static int macb_probe(struct platform_device *pdev)
        struct macb *bp;
        int err;
 
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mem = devm_ioremap_resource(&pdev->dev, regs);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
+
        if (np) {
                const struct of_device_id *match;
 
@@ -2809,14 +2846,9 @@ static int macb_probe(struct platform_device *pdev)
        if (err)
                return err;
 
-       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       mem = devm_ioremap_resource(&pdev->dev, regs);
-       if (IS_ERR(mem)) {
-               err = PTR_ERR(mem);
-               goto err_disable_clocks;
-       }
+       native_io = hw_is_native_io(mem);
 
-       macb_probe_queues(mem, &queue_mask, &num_queues);
+       macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
        dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
        if (!dev) {
                err = -ENOMEM;
@@ -2831,6 +2863,14 @@ static int macb_probe(struct platform_device *pdev)
        bp->pdev = pdev;
        bp->dev = dev;
        bp->regs = mem;
+       bp->native_io = native_io;
+       if (native_io) {
+               bp->macb_reg_readl = hw_readl_native;
+               bp->macb_reg_writel = hw_writel_native;
+       } else {
+               bp->macb_reg_readl = hw_readl;
+               bp->macb_reg_writel = hw_writel;
+       }
        bp->num_queues = num_queues;
        bp->queue_mask = queue_mask;
        if (macb_config)
@@ -2838,9 +2878,8 @@ static int macb_probe(struct platform_device *pdev)
        bp->pclk = pclk;
        bp->hclk = hclk;
        bp->tx_clk = tx_clk;
-       if (macb_config->jumbo_max_len) {
+       if (macb_config)
                bp->jumbo_max_len = macb_config->jumbo_max_len;
-       }
 
        spin_lock_init(&bp->lock);
 
index d74655993d4bf19cec68ab227263f5f069467e4c..6e1faea00ca829f5c9df34b45518d7e1a881c5c3 100644 (file)
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
 #define MACB_CAPS_MACB_IS_GEM                  0x80000000
-#define MACB_CAPS_JUMBO                                0x00000008
+#define MACB_CAPS_JUMBO                                0x00000010
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)                                 \
         | GEM_BF(name, value))
 
 /* Register access macros */
-#define macb_readl(port,reg)                           \
-       readl_relaxed((port)->regs + MACB_##reg)
-#define macb_writel(port,reg,value)                    \
-       writel_relaxed((value), (port)->regs + MACB_##reg)
-#define gem_readl(port, reg)                           \
-       readl_relaxed((port)->regs + GEM_##reg)
-#define gem_writel(port, reg, value)                   \
-       writel_relaxed((value), (port)->regs + GEM_##reg)
-#define queue_readl(queue, reg)                                \
-       readl_relaxed((queue)->bp->regs + (queue)->reg)
-#define queue_writel(queue, reg, value)                        \
-       writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
+#define macb_readl(port, reg)          (port)->macb_reg_readl((port), MACB_##reg)
+#define macb_writel(port, reg, value)  (port)->macb_reg_writel((port), MACB_##reg, (value))
+#define gem_readl(port, reg)           (port)->macb_reg_readl((port), GEM_##reg)
+#define gem_writel(port, reg, value)   (port)->macb_reg_writel((port), GEM_##reg, (value))
+#define queue_readl(queue, reg)                (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
+#define queue_writel(queue, reg, value)        (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
 
 /* Conditional GEM/MACB macros.  These perform the operation to the correct
  * register dependent on whether the device is a GEM or a MACB.  For registers
@@ -785,6 +779,11 @@ struct macb_queue {
 
 struct macb {
        void __iomem            *regs;
+       bool                    native_io;
+
+       /* hardware IO accessors */
+       u32     (*macb_reg_readl)(struct macb *bp, int offset);
+       void    (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
 
        unsigned int            rx_tail;
        unsigned int            rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
 
        struct mii_bus          *mii_bus;
        struct phy_device       *phy_dev;
-       unsigned int            link;
-       unsigned int            speed;
-       unsigned int            duplex;
+       int                     link;
+       int                     speed;
+       int                     duplex;
 
        u32                     caps;
        unsigned int            dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
        return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
 }
 
-static inline bool macb_is_gem_hw(void __iomem *addr)
-{
-       return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
-}
-
 #endif /* _MACB_H */
index c4d6bbe9458dbfe9c726fdff1e9b6c4c64b8a33f..3584420878782aa72800072e28997d40280dccec 100644 (file)
@@ -37,6 +37,8 @@ config        THUNDER_NIC_BGX
        tristate "Thunder MAC interface driver (BGX)"
        depends on 64BIT
        default ARCH_THUNDER
+       select PHYLIB
+       select MDIO_OCTEON
        ---help---
          This driver supports programming and controlling of MAC
          interface from NIC physical function driver.
index dda8a02b7322d63197d583ab1c107226ea913051..8aee250904ec83de7b230bf186c3835e0957f46a 100644 (file)
  */
 #define NICPF_CLK_PER_INT_TICK         2
 
+/* Time to wait before we decide that a SQ is stuck.
+ *
+ * Since both pkt rx and tx notifications are done with same CQ,
+ * when packets are being received at very high rate (eg: L2 forwarding)
+ * then freeing transmitted skbs will be delayed and watchdog
+ * will kick in, resetting interface. Hence keeping this value high.
+ */
+#define        NICVF_TX_TIMEOUT                (50 * HZ)
+
 struct nicvf_cq_poll {
        u8      cq_idx;         /* Completion queue index */
        struct  napi_struct napi;
@@ -216,8 +225,9 @@ struct nicvf_drv_stats {
        /* Tx */
        u64 tx_frames_ok;
        u64 tx_drops;
-       u64 tx_busy;
        u64 tx_tso;
+       u64 txq_stop;
+       u64 txq_wake;
 };
 
 struct nicvf {
index 16bd2d772db9f81da0969f7594c4aa3b1761c30f..a4228e66456707d3da44e0cfce653b318b180444 100644 (file)
@@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
        NICVF_DRV_STAT(rx_frames_jumbo),
        NICVF_DRV_STAT(rx_drops),
        NICVF_DRV_STAT(tx_frames_ok),
-       NICVF_DRV_STAT(tx_busy),
        NICVF_DRV_STAT(tx_tso),
        NICVF_DRV_STAT(tx_drops),
+       NICVF_DRV_STAT(txq_stop),
+       NICVF_DRV_STAT(txq_wake),
 };
 
 static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
 
 static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
 {
+       struct nicvf *nic = netdev_priv(netdev);
        int stats, qidx;
 
        if (sset != ETH_SS_STATS)
@@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
                data += ETH_GSTRING_LEN;
        }
 
-       for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+       for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
                for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
                        sprintf(data, "rxq%d: %s", qidx,
                                nicvf_queue_stats[stats].name);
@@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
                }
        }
 
-       for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+       for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
                for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
                        sprintf(data, "txq%d: %s", qidx,
                                nicvf_queue_stats[stats].name);
@@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
 
 static int nicvf_get_sset_count(struct net_device *netdev, int sset)
 {
+       struct nicvf *nic = netdev_priv(netdev);
+
        if (sset != ETH_SS_STATS)
                return -EINVAL;
 
        return nicvf_n_hw_stats + nicvf_n_drv_stats +
                (nicvf_n_queue_stats *
-                (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) +
+                (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
                BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
 }
 
@@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
                *(data++) = ((u64 *)&nic->drv_stats)
                                [nicvf_drv_stats[stat].index];
 
-       for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+       for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
                for (stat = 0; stat < nicvf_n_queue_stats; stat++)
                        *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
                                        [nicvf_queue_stats[stat].index];
        }
 
-       for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+       for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
                for (stat = 0; stat < nicvf_n_queue_stats; stat++)
                        *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
                                        [nicvf_queue_stats[stat].index];
@@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev,
 {
        struct nicvf *nic = netdev_priv(dev);
        int err = 0;
+       bool if_up = netif_running(dev);
 
        if (!channel->rx_count || !channel->tx_count)
                return -EINVAL;
@@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev,
        if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
                return -EINVAL;
 
+       if (if_up)
+               nicvf_stop(dev);
+
        nic->qs->rq_cnt = channel->rx_count;
        nic->qs->sq_cnt = channel->tx_count;
        nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
@@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev,
        if (err)
                return err;
 
-       if (!netif_running(dev))
-               return err;
+       if (if_up)
+               nicvf_open(dev);
 
-       nicvf_stop(dev);
-       nicvf_open(dev);
        netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
                    nic->qs->sq_cnt, nic->qs->rq_cnt);
 
index 8b119a035b7e2f547a9259ce35eb390ce09c4b90..3b90afb8c293254e999f76f3ebbfbd0108dd7022 100644 (file)
@@ -234,7 +234,7 @@ static void  nicvf_handle_mbx_intr(struct nicvf *nic)
                                    nic->duplex == DUPLEX_FULL ?
                                "Full duplex" : "Half duplex");
                        netif_carrier_on(nic->netdev);
-                       netif_tx_wake_all_queues(nic->netdev);
+                       netif_tx_start_all_queues(nic->netdev);
                } else {
                        netdev_info(nic->netdev, "%s: Link is Down\n",
                                    nic->netdev->name);
@@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
        if (skb) {
                prefetch(skb);
                dev_consume_skb_any(skb);
+               sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
        }
 }
 
@@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
                                 struct napi_struct *napi, int budget)
 {
-       int processed_cqe, work_done = 0;
+       int processed_cqe, work_done = 0, tx_done = 0;
        int cqe_count, cqe_head;
        struct nicvf *nic = netdev_priv(netdev);
        struct queue_set *qs = nic->qs;
        struct cmp_queue *cq = &qs->cq[cq_idx];
        struct cqe_rx_t *cq_desc;
+       struct netdev_queue *txq;
 
        spin_lock_bh(&cq->lock);
 loop:
@@ -496,8 +498,8 @@ loop:
        cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
        cqe_head &= 0xFFFF;
 
-       netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
-                  __func__, cqe_count, cqe_head);
+       netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
+                  __func__, cq_idx, cqe_count, cqe_head);
        while (processed_cqe < cqe_count) {
                /* Get the CQ descriptor */
                cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +513,8 @@ loop:
                        break;
                }
 
-               netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
-                          cq_desc->cqe_type);
+               netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
+                          cq_idx, cq_desc->cqe_type);
                switch (cq_desc->cqe_type) {
                case CQE_TYPE_RX:
                        nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +524,7 @@ loop:
                case CQE_TYPE_SEND:
                        nicvf_snd_pkt_handler(netdev, cq,
                                              (void *)cq_desc, CQE_TYPE_SEND);
+                       tx_done++;
                break;
                case CQE_TYPE_INVALID:
                case CQE_TYPE_RX_SPLIT:
@@ -532,8 +535,9 @@ loop:
                }
                processed_cqe++;
        }
-       netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
-                  __func__, processed_cqe, work_done, budget);
+       netdev_dbg(nic->netdev,
+                  "%s CQ%d processed_cqe %d work_done %d budget %d\n",
+                  __func__, cq_idx, processed_cqe, work_done, budget);
 
        /* Ring doorbell to inform H/W to reuse processed CQEs */
        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +547,19 @@ loop:
                goto loop;
 
 done:
+       /* Wakeup TXQ if its stopped earlier due to SQ full */
+       if (tx_done) {
+               txq = netdev_get_tx_queue(netdev, cq_idx);
+               if (netif_tx_queue_stopped(txq)) {
+                       netif_tx_start_queue(txq);
+                       nic->drv_stats.txq_wake++;
+                       if (netif_msg_tx_err(nic))
+                               netdev_warn(netdev,
+                                           "%s: Transmit queue wakeup SQ%d\n",
+                                           netdev->name, cq_idx);
+               }
+       }
+
        spin_unlock_bh(&cq->lock);
        return work_done;
 }
@@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
        struct net_device *netdev = napi->dev;
        struct nicvf *nic = netdev_priv(netdev);
        struct nicvf_cq_poll *cq;
-       struct netdev_queue *txq;
 
        cq = container_of(napi, struct nicvf_cq_poll, napi);
        work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
 
-       txq = netdev_get_tx_queue(netdev, cq->cq_idx);
-       if (netif_tx_queue_stopped(txq))
-               netif_tx_wake_queue(txq);
-
        if (work_done < budget) {
                /* Slow packet rate, exit polling */
                napi_complete(napi);
@@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_OK;
        }
 
-       if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
+       if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
                netif_tx_stop_queue(txq);
-               nic->drv_stats.tx_busy++;
+               nic->drv_stats.txq_stop++;
                if (netif_msg_tx_err(nic))
                        netdev_warn(netdev,
                                    "%s: Transmit ring full, stopping SQ%d\n",
@@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev)
        nicvf_send_msg_to_pf(nic, &mbx);
 
        netif_carrier_off(netdev);
-       netif_tx_disable(netdev);
 
        /* Disable RBDR & QS error interrupts */
        for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev)
                kfree(cq_poll);
        }
 
+       netif_tx_disable(netdev);
+
        /* Free resources */
        nicvf_config_data_transfer(nic, false);
 
@@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev)
        for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
                nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
 
+       nic->drv_stats.txq_stop = 0;
+       nic->drv_stats.txq_wake = 0;
+
        netif_carrier_on(netdev);
        netif_tx_start_all_queues(netdev);
 
@@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        netdev->hw_features = netdev->features;
 
        netdev->netdev_ops = &nicvf_netdev_ops;
+       netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
 
        INIT_WORK(&nic->reset_task, nicvf_reset_task);
 
@@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+static void nicvf_shutdown(struct pci_dev *pdev)
+{
+       nicvf_remove(pdev);
+}
+
 static struct pci_driver nicvf_driver = {
        .name = DRV_NAME,
        .id_table = nicvf_id_table,
        .probe = nicvf_probe,
        .remove = nicvf_remove,
+       .shutdown = nicvf_shutdown,
 };
 
 static int __init nicvf_init_module(void)
index d69d228d11a013b4cb709041e49472a9b3b34040..ca4240aa6d1567c044442c239c16f330bdd4f0f4 100644 (file)
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
 
        /* Allocate a new page */
        if (!nic->rb_page) {
-               nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
+               nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+                                          order);
                if (!nic->rb_page) {
-                       netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
+                       netdev_err(nic->netdev,
+                                  "Failed to allocate new rcv buffer\n");
                        return -ENOMEM;
                }
                nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
                return;
 
        if (sq->tso_hdrs)
-               dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
+               dma_free_coherent(&nic->pdev->dev,
+                                 sq->dmem.q_len * TSO_HEADER_SIZE,
                                  sq->tso_hdrs, sq->tso_hdrs_phys);
 
        kfree(sq->skbuff);
@@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
                        continue;
                }
                skb = (struct sk_buff *)sq->skbuff[sq->head];
+               if (skb)
+                       dev_kfree_skb_any(skb);
                atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
                atomic64_add(hdr->tot_len,
                             (atomic64_t *)&netdev->stats.tx_bytes);
-               dev_kfree_skb_any(skb);
                nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
        }
 }
@@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
 
        memset(gather, 0, SND_QUEUE_DESC_SIZE);
        gather->subdesc_type = SQ_DESC_TYPE_GATHER;
-       gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
+       gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
        gather->size = size;
        gather->addr = data;
 }
@@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
                }
                nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
                                         seg_subdescs - 1, skb, seg_len);
-               sq->skbuff[hdr_qentry] = 0;
+               sq->skbuff[hdr_qentry] = (u64)NULL;
                qentry = nicvf_get_nxt_sqentry(sq, qentry);
 
                desc_cnt += seg_subdescs;
@@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
        /* Inform HW to xmit all TSO segments */
        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
                              skb_get_queue_mapping(skb), desc_cnt);
+       nic->drv_stats.tx_tso++;
        return 1;
 }
 
index 8341bdf755d1dfa8a4d288c43d7037fccf5bb6ea..f0937b7bfe9f476d914f9eb28ad040125b9750d9 100644 (file)
@@ -62,7 +62,7 @@
 #define SND_QUEUE_CNT          8
 #define CMP_QUEUE_CNT          8 /* Max of RCV and SND qcount */
 
-#define SND_QSIZE              SND_QUEUE_SIZE4
+#define SND_QSIZE              SND_QUEUE_SIZE2
 #define SND_QUEUE_LEN          (1ULL << (SND_QSIZE + 10))
 #define MAX_SND_QUEUE_LEN      (1ULL << (SND_QUEUE_SIZE6 + 10))
 #define SND_QUEUE_THRESH       2ULL
 /* Since timestamp not enabled, otherwise 2 */
 #define MAX_CQE_PER_PKT_XMIT           1
 
-#define CMP_QSIZE              CMP_QUEUE_SIZE4
+/* Keep CQ and SQ sizes same, if timestamping
+ * is enabled this equation will change.
+ */
+#define CMP_QSIZE              CMP_QUEUE_SIZE2
 #define CMP_QUEUE_LEN          (1ULL << (CMP_QSIZE + 10))
 #define CMP_QUEUE_CQE_THRESH   0
 #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
 
 #define MAX_CQES_FOR_TX                ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
                                 MAX_CQE_PER_PKT_XMIT)
-#define RQ_CQ_DROP             ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+/* Calculate number of CQEs to reserve for all SQEs.
+ * Its 1/256th level of CQ size.
+ * '+ 1' to account for pipelining
+ */
+#define RQ_CQ_DROP             ((256 / (CMP_QUEUE_LEN / \
+                                (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
 
 /* Descriptor size in bytes */
 #define SND_QUEUE_DESC_SIZE    16
index 633ec05dfe0578b71825bf409df24371f58a776c..b961a89dc6264555553ee94484478b5dca9822dc 100644 (file)
@@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
        bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
        bgx_flush_dmac_addrs(bgx, lmacid);
 
-       if (lmac->phydev)
+       if ((bgx->lmac_type != BGX_MODE_XFI) &&
+           (bgx->lmac_type != BGX_MODE_XLAUI) &&
+           (bgx->lmac_type != BGX_MODE_40G_KR) &&
+           (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
                phy_disconnect(lmac->phydev);
 
        lmac->phydev = NULL;
index 629f75d703535d3ef5d8319a9ed2f3ec90a51ba9..58de4443eac0318234fd6b6ef46384243f14d7da 100644 (file)
@@ -767,6 +767,7 @@ struct adapter {
        bool tid_release_task_busy;
 
        struct dentry *debugfs_root;
+       u32 use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
 
        spinlock_t stats_lock;
        spinlock_t win0_lock ____cacheline_aligned_in_smp;
index 6074680bc9858308fa68887140856c8bb05b4d6a..052c660aca80a8dd06593112c9961a6bdda07309 100644 (file)
@@ -31,6 +31,15 @@ static const char * const dcb_ver_array[] = {
        "Auto Negotiated"
 };
 
+static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
+{
+       if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
+           state == CXGB4_DCB_STATE_HOST)
+               return true;
+       else
+               return false;
+}
+
 /* Initialize a port's Data Center Bridging state.  Typically used after a
  * Link Down event.
  */
@@ -603,7 +612,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
        struct port_info *pi = netdev2pinfo(dev);
        struct port_dcb_info *dcb = &pi->dcb;
 
-       if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+       if (!cxgb4_dcb_state_synced(dcb->state) ||
            priority >= CXGB4_MAX_PRIORITY)
                *pfccfg = 0;
        else
@@ -620,7 +629,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
        struct adapter *adap = pi->adapter;
        int err;
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+       if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
            priority >= CXGB4_MAX_PRIORITY)
                return;
 
@@ -732,7 +741,7 @@ static u8 cxgb4_getpfcstate(struct net_device *dev)
 {
        struct port_info *pi = netdev2pinfo(dev);
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return false;
 
        return pi->dcb.pfcen != 0;
@@ -756,7 +765,7 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
        struct adapter *adap = pi->adapter;
        int i;
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return 0;
 
        for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -794,7 +803,9 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
  */
 static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
 {
-       return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+       /* Convert app_idtype to firmware format before querying */
+       return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+                             app_idtype : 3, app_id, 0);
 }
 
 /* Write a new Application User Priority Map for the specified Application ID
@@ -808,7 +819,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
        int i, err;
 
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return -EINVAL;
 
        /* DCB info gets thrown away on link up */
@@ -896,10 +907,11 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
        struct port_info *pi = netdev2pinfo(dev);
        struct port_dcb_info *dcb = &pi->dcb;
 
-       if (dcb_subtype && !(dcb->msgs & dcb_subtype))
-               return 0;
+       if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
+               if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+                       return 0;
 
-       return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+       return (cxgb4_dcb_state_synced(dcb->state) &&
                (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
 }
 
@@ -1057,7 +1069,7 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
 
        /* Can't enable DCB if we haven't successfully negotiated it.
         */
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return 1;
 
        /* There's currently no mechanism to allow for the firmware DCBX
@@ -1080,7 +1092,7 @@ static int cxgb4_getpeer_app(struct net_device *dev,
        struct adapter *adap = pi->adapter;
        int i, err = 0;
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return 1;
 
        info->willing = 0;
@@ -1114,7 +1126,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
        struct adapter *adap = pi->adapter;
        int i, err = 0;
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return 1;
 
        for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -1133,7 +1145,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
                if (!pcmd.u.dcb.app_priority.protocolid)
                        break;
 
-               table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+               table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
                table[i].protocol =
                        be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
                table[i].priority =
@@ -1181,6 +1193,8 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
        for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
                pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
 
+       pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
+
        return 0;
 }
 
@@ -1198,6 +1212,8 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
         */
        pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
 
+       pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
+
        return 0;
 }
 
index a11485fbb33f2b7bcd6c973324ea41601dbaf575..b6577349cf4e3beeec71ffa6440b0911b5ee0aa3 100644 (file)
@@ -151,6 +151,45 @@ static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
        return 0;
 }
 
+static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, "Status   Inst    Data      PC     LS0Stat  "
+                        "LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data\n");
+       } else {
+               const u32 *p = v;
+
+               seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
+                          (p[9] >> 16) & 0xff,       /* Status */
+                          p[9] & 0xffff, p[8] >> 16, /* Inst */
+                          p[8] & 0xffff, p[7] >> 16, /* Data */
+                          p[7] & 0xffff, p[6] >> 16, /* PC */
+                          p[2], p[1], p[0],      /* LS0 Stat, Addr and Data */
+                          p[5], p[4], p[3]);     /* LS1 Stat, Addr and Data */
+       }
+       return 0;
+}
+
+static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, "Status   Inst    Data      PC\n");
+       } else {
+               const u32 *p = v;
+
+               seq_printf(seq, "  %02x   %08x %08x %08x\n",
+                          p[3] & 0xff, p[2], p[1], p[0]);
+               seq_printf(seq, "  %02x   %02x%06x %02x%06x %02x%06x\n",
+                          (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+                          p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+               seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x\n",
+                          (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+                          p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+                          p[6] >> 16);
+       }
+       return 0;
+}
+
 static int cim_la_open(struct inode *inode, struct file *file)
 {
        int ret;
@@ -162,9 +201,18 @@ static int cim_la_open(struct inode *inode, struct file *file)
        if (ret)
                return ret;
 
-       p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
-                        cfg & UPDBGLACAPTPCONLY_F ?
-                        cim_la_show_3in1 : cim_la_show);
+       if (is_t6(adap->params.chip)) {
+               /* +1 to account for integer division of CIMLA_SIZE/10 */
+               p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
+                                10 * sizeof(u32), 1,
+                                cfg & UPDBGLACAPTPCONLY_F ?
+                                       cim_la_show_pc_t6 : cim_la_show_t6);
+       } else {
+               p = seq_open_tab(file, adap->params.cim_la_size / 8,
+                                8 * sizeof(u32), 1,
+                                cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
+                                                            cim_la_show);
+       }
        if (!p)
                return -ENOMEM;
 
@@ -2227,6 +2275,290 @@ static const struct file_operations blocked_fl_fops = {
        .llseek  = generic_file_llseek,
 };
 
+struct mem_desc {
+       unsigned int base;
+       unsigned int limit;
+       unsigned int idx;
+};
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+       return ((const struct mem_desc *)a)->base -
+              ((const struct mem_desc *)b)->base;
+}
+
+static void mem_region_show(struct seq_file *seq, const char *name,
+                           unsigned int from, unsigned int to)
+{
+       char buf[40];
+
+       string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
+                       sizeof(buf));
+       seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
+}
+
+static int meminfo_show(struct seq_file *seq, void *v)
+{
+       static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
+                                       "MC0:", "MC1:"};
+       static const char * const region[] = {
+               "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+               "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+               "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+               "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+               "RQUDP region:", "PBL region:", "TXPBL region:",
+               "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+               "On-chip queues:"
+       };
+
+       int i, n;
+       u32 lo, hi, used, alloc;
+       struct mem_desc avail[4];
+       struct mem_desc mem[ARRAY_SIZE(region) + 3];      /* up to 3 holes */
+       struct mem_desc *md = mem;
+       struct adapter *adap = seq->private;
+
+       for (i = 0; i < ARRAY_SIZE(mem); i++) {
+               mem[i].limit = 0;
+               mem[i].idx = i;
+       }
+
+       /* Find and sort the populated memory ranges */
+       i = 0;
+       lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+       if (lo & EDRAM0_ENABLE_F) {
+               hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+               avail[i].base = EDRAM0_BASE_G(hi) << 20;
+               avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
+               avail[i].idx = 0;
+               i++;
+       }
+       if (lo & EDRAM1_ENABLE_F) {
+               hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+               avail[i].base = EDRAM1_BASE_G(hi) << 20;
+               avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
+               avail[i].idx = 1;
+               i++;
+       }
+
+       if (is_t5(adap->params.chip)) {
+               if (lo & EXT_MEM0_ENABLE_F) {
+                       hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+                       avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
+                       avail[i].limit =
+                               avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
+                       avail[i].idx = 3;
+                       i++;
+               }
+               if (lo & EXT_MEM1_ENABLE_F) {
+                       hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+                       avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
+                       avail[i].limit =
+                               avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
+                       avail[i].idx = 4;
+                       i++;
+               }
+       } else {
+               if (lo & EXT_MEM_ENABLE_F) {
+                       hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+                       avail[i].base = EXT_MEM_BASE_G(hi) << 20;
+                       avail[i].limit =
+                               avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
+                       avail[i].idx = 2;
+                       i++;
+               }
+       }
+       if (!i)                                    /* no memory available */
+               return 0;
+       sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+       (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
+       (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
+       (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
+       (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
+       (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
+       (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
+       (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
+       (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
+       (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
+
+       /* the next few have explicit upper bounds */
+       md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
+       md->limit = md->base - 1 +
+                   t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
+                   PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
+       md++;
+
+       md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
+       md->limit = md->base - 1 +
+                   t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
+                   PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
+       md++;
+
+       if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+               if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+                       hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
+                       md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+                } else {
+                       hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+                       md->base = t4_read_reg(adap,
+                                              LE_DB_HASH_TBL_BASE_ADDR_A);
+               }
+               md->limit = 0;
+       } else {
+               md->base = 0;
+               md->idx = ARRAY_SIZE(region);  /* hide it */
+       }
+       md++;
+
+#define ulp_region(reg) do { \
+       md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
+       (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
+} while (0)
+
+       ulp_region(RX_ISCSI);
+       ulp_region(RX_TDDP);
+       ulp_region(TX_TPT);
+       ulp_region(RX_STAG);
+       ulp_region(RX_RQ);
+       ulp_region(RX_RQUDP);
+       ulp_region(RX_PBL);
+       ulp_region(TX_PBL);
+#undef ulp_region
+       md->base = 0;
+       md->idx = ARRAY_SIZE(region);
+       if (!is_t4(adap->params.chip)) {
+               u32 size = 0;
+               u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
+               u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
+
+               if (is_t5(adap->params.chip)) {
+                       if (sge_ctrl & VFIFO_ENABLE_F)
+                               size = DBVFIFO_SIZE_G(fifo_size);
+               } else {
+                       size = T6_DBVFIFO_SIZE_G(fifo_size);
+               }
+
+               if (size) {
+                       md->base = BASEADDR_G(t4_read_reg(adap,
+                                       SGE_DBVFIFO_BADDR_A));
+                       md->limit = md->base + (size << 2) - 1;
+               }
+       }
+
+       md++;
+
+       md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
+       md->limit = 0;
+       md++;
+       md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
+       md->limit = 0;
+       md++;
+
+       md->base = adap->vres.ocq.start;
+       if (adap->vres.ocq.size)
+               md->limit = md->base + adap->vres.ocq.size - 1;
+       else
+               md->idx = ARRAY_SIZE(region);  /* hide it */
+       md++;
+
+       /* add any address-space holes, there can be up to 3 */
+       for (n = 0; n < i - 1; n++)
+               if (avail[n].limit < avail[n + 1].base)
+                       (md++)->base = avail[n].limit;
+       if (avail[n].limit)
+               (md++)->base = avail[n].limit;
+
+       n = md - mem;
+       sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+       for (lo = 0; lo < i; lo++)
+               mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
+                               avail[lo].limit - 1);
+
+       seq_putc(seq, '\n');
+       for (i = 0; i < n; i++) {
+               if (mem[i].idx >= ARRAY_SIZE(region))
+                       continue;                        /* skip holes */
+               if (!mem[i].limit)
+                       mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
+               mem_region_show(seq, region[mem[i].idx], mem[i].base,
+                               mem[i].limit);
+       }
+
+       seq_putc(seq, '\n');
+       lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
+       hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+       mem_region_show(seq, "uP RAM:", lo, hi);
+
+       lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
+       hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+       mem_region_show(seq, "uP Extmem2:", lo, hi);
+
+       lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
+       seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
+                  PMRXMAXPAGE_G(lo),
+                  t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
+                  (lo & PMRXNUMCHN_F) ? 2 : 1);
+
+       lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
+       hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
+       seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
+                  PMTXMAXPAGE_G(lo),
+                  hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
+                  hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
+       seq_printf(seq, "%u p-structs\n\n",
+                  t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+
+       for (i = 0; i < 4; i++) {
+               if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+                       lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+               else
+                       lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
+               if (is_t5(adap->params.chip)) {
+                       used = T5_USED_G(lo);
+                       alloc = T5_ALLOC_G(lo);
+               } else {
+                       used = USED_G(lo);
+                       alloc = ALLOC_G(lo);
+               }
+               /* For T6 these are MAC buffer groups */
+               seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
+                          i, used, alloc);
+       }
+       for (i = 0; i < adap->params.arch.nchan; i++) {
+               if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+                       lo = t4_read_reg(adap,
+                                        MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+               else
+                       lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
+               if (is_t5(adap->params.chip)) {
+                       used = T5_USED_G(lo);
+                       alloc = T5_ALLOC_G(lo);
+               } else {
+                       used = USED_G(lo);
+                       alloc = ALLOC_G(lo);
+               }
+               /* For T6 these are MAC buffer groups */
+               seq_printf(seq,
+                          "Loopback %d using %u pages out of %u allocated\n",
+                          i, used, alloc);
+       }
+       return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, meminfo_show, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+       .owner   = THIS_MODULE,
+       .open    = meminfo_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = single_release,
+};
 /* Add an array of Debug FS files.
  */
 void add_debugfs_files(struct adapter *adap,
@@ -2294,6 +2626,7 @@ int t4_setup_debugfs(struct adapter *adap)
                { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
 #endif
                { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
+               { "meminfo", &meminfo_fops, S_IRUSR, 0 },
        };
 
        /* Debug FS nodes common to all T5 and later adapters.
@@ -2340,6 +2673,8 @@ int t4_setup_debugfs(struct adapter *adap)
 
        de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
                                      &flash_debugfs_fops, adap->params.sf_size);
+       debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+                           adap->debugfs_root, &adap->use_bd);
 
        return 0;
 }
index 687acf71fa15e01e5886b5055f9bc9b8ccbc4929..5eedb98ff581a8c67dd8284a1cc2ad9fd3fbdd6e 100644 (file)
@@ -925,6 +925,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
        const struct firmware *fw;
        struct adapter *adap = netdev2adap(netdev);
        unsigned int mbox = PCIE_FW_MASTER_M + 1;
+       u32 pcie_fw;
+       unsigned int master;
+       u8 master_vld = 0;
+
+       pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+       master = PCIE_FW_MASTER_G(pcie_fw);
+       if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+               master_vld = 1;
+       /* if csiostor is the master return */
+       if (master_vld && (master != adap->pf)) {
+               dev_warn(adap->pdev_dev,
+                        "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+               return -EOPNOTSUPP;
+       }
 
        ef->data[sizeof(ef->data) - 1] = '\0';
        ret = request_firmware(&fw, ef->data, adap->pdev_dev);
index 351f3b1bf80025167c9afcc226252ec923a639b1..27e87b6baa455cc41c85510ce6a54d23d3cae8e7 100644 (file)
@@ -4551,6 +4551,32 @@ static void free_some_resources(struct adapter *adapter)
                   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
 #define SEGMENT_SIZE 128
 
+static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
+{
+       int ver, chip;
+       u16 device_id;
+
+       /* Retrieve adapter's device ID */
+       pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+       ver = device_id >> 12;
+       switch (ver) {
+       case CHELSIO_T4:
+               chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+               break;
+       case CHELSIO_T5:
+               chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+               break;
+       case CHELSIO_T6:
+               chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+               break;
+       default:
+               dev_err(&pdev->dev, "Device %d is not supported\n",
+                       device_id);
+               return -EINVAL;
+       }
+       return chip;
+}
+
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        int func, i, err, s_qpp, qpp, num_seg;
@@ -4558,6 +4584,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        bool highdma = false;
        struct adapter *adapter = NULL;
        void __iomem *regs;
+       u32 whoami, pl_rev;
+       enum chip_type chip;
 
        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -4586,7 +4614,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_unmap_bar0;
 
        /* We control everything through one PF */
-       func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
+       whoami = readl(regs + PL_WHOAMI_A);
+       pl_rev = REV_G(readl(regs + PL_REV_A));
+       chip = get_chip_type(pdev, pl_rev);
+       func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+               SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
        if (func != ent->driver_data) {
                iounmap(regs);
                pci_disable_device(pdev);
@@ -4757,7 +4789,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        cfg_queues(adapter);
 
-       adapter->l2t = t4_init_l2t();
+       adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
        if (!adapter->l2t) {
                /* We tolerate a lack of L2T, giving up some functionality */
                dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
index 252efc29321f4e7c57a53e631b520ea8820f6900..ac27898c6ab0b249ad6ced9308b55cd40f883ece 100644 (file)
 #define VLAN_NONE 0xfff
 
 /* identifies sync vs async L2T_WRITE_REQs */
-#define F_SYNC_WR    (1 << 12)
-
-enum {
-       L2T_STATE_VALID,      /* entry is up to date */
-       L2T_STATE_STALE,      /* entry may be used but needs revalidation */
-       L2T_STATE_RESOLVING,  /* entry needs address resolution */
-       L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
-
-       /* when state is one of the below the entry is not hashed */
-       L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
-       L2T_STATE_UNUSED      /* entry not in use */
-};
+#define SYNC_WR_S    12
+#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
+#define SYNC_WR_F    SYNC_WR_V(1)
 
 struct l2t_data {
+       unsigned int l2t_start;     /* start index of our piece of the L2T */
+       unsigned int l2t_size;      /* number of entries in l2tab */
        rwlock_t lock;
        atomic_t nfree;             /* number of free entries */
        struct l2t_entry *rover;    /* starting point for next allocation */
-       struct l2t_entry l2tab[L2T_SIZE];
+       struct l2t_entry l2tab[0];  /* MUST BE LAST */
 };
 
 static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -85,29 +78,36 @@ static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
 /*
  * To avoid having to check address families we do not allow v4 and v6
  * neighbors to be on the same hash chain.  We keep v4 entries in the first
- * half of available hash buckets and v6 in the second.
+ * half of available hash buckets and v6 in the second.  We need at least two
+ * entries in our L2T for this scheme to work.
  */
 enum {
-       L2T_SZ_HALF = L2T_SIZE / 2,
-       L2T_HASH_MASK = L2T_SZ_HALF - 1
+       L2T_MIN_HASH_BUCKETS = 2,
 };
 
-static inline unsigned int arp_hash(const u32 *key, int ifindex)
+static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
+                                   int ifindex)
 {
-       return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+       unsigned int l2t_size_half = d->l2t_size / 2;
+
+       return jhash_2words(*key, ifindex, 0) % l2t_size_half;
 }
 
-static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
+                                    int ifindex)
 {
+       unsigned int l2t_size_half = d->l2t_size / 2;
        u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
 
-       return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+       return (l2t_size_half +
+               (jhash_2words(xor, ifindex, 0) % l2t_size_half));
 }
 
-static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
+                             int addr_len, int ifindex)
 {
-       return addr_len == 4 ? arp_hash(addr, ifindex) :
-                              ipv6_hash(addr, ifindex);
+       return addr_len == 4 ? arp_hash(d, addr, ifindex) :
+                              ipv6_hash(d, addr, ifindex);
 }
 
 /*
@@ -139,6 +139,8 @@ static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
  */
 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
 {
+       struct l2t_data *d = adap->l2t;
+       unsigned int l2t_idx = e->idx + d->l2t_start;
        struct sk_buff *skb;
        struct cpl_l2t_write_req *req;
 
@@ -150,10 +152,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
        INIT_TP_WR(req, 0);
 
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
-                                       e->idx | (sync ? F_SYNC_WR : 0) |
+                                       l2t_idx | (sync ? SYNC_WR_F : 0) |
                                        TID_QID_V(adap->sge.fw_evtq.abs_id)));
        req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
-       req->l2t_idx = htons(e->idx);
+       req->l2t_idx = htons(l2t_idx);
        req->vlan = htons(e->vlan);
        if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
                memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
@@ -190,18 +192,19 @@ static void send_pending(struct adapter *adap, struct l2t_entry *e)
  */
 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
 {
+       struct l2t_data *d = adap->l2t;
        unsigned int tid = GET_TID(rpl);
-       unsigned int idx = tid & (L2T_SIZE - 1);
+       unsigned int l2t_idx = tid % L2T_SIZE;
 
        if (unlikely(rpl->status != CPL_ERR_NONE)) {
                dev_err(adap->pdev_dev,
                        "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
-                       rpl->status, idx);
+                       rpl->status, l2t_idx);
                return;
        }
 
-       if (tid & F_SYNC_WR) {
-               struct l2t_entry *e = &adap->l2t->l2tab[idx];
+       if (tid & SYNC_WR_F) {
+               struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
 
                spin_lock(&e->lock);
                if (e->state != L2T_STATE_SWITCHING) {
@@ -276,7 +279,7 @@ static struct l2t_entry *alloc_l2e(struct l2t_data *d)
                return NULL;
 
        /* there's definitely a free entry */
-       for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+       for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
                if (atomic_read(&e->refcnt) == 0)
                        goto found;
 
@@ -368,7 +371,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
        int addr_len = neigh->tbl->key_len;
        u32 *addr = (u32 *)neigh->primary_key;
        int ifidx = neigh->dev->ifindex;
-       int hash = addr_hash(addr, addr_len, ifidx);
+       int hash = addr_hash(d, addr, addr_len, ifidx);
 
        if (neigh->dev->flags & IFF_LOOPBACK)
                lport = netdev2pinfo(physdev)->tx_chan + 4;
@@ -481,7 +484,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
        int addr_len = neigh->tbl->key_len;
        u32 *addr = (u32 *) neigh->primary_key;
        int ifidx = neigh->dev->ifindex;
-       int hash = addr_hash(addr, addr_len, ifidx);
+       int hash = addr_hash(d, addr, addr_len, ifidx);
 
        read_lock_bh(&d->lock);
        for (e = d->l2tab[hash].first; e; e = e->next)
@@ -554,20 +557,30 @@ int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
        return write_l2e(adap, e, 0);
 }
 
-struct l2t_data *t4_init_l2t(void)
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
 {
+       unsigned int l2t_size;
        int i;
        struct l2t_data *d;
 
-       d = t4_alloc_mem(sizeof(*d));
+       if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+               return NULL;
+       l2t_size = l2t_end - l2t_start + 1;
+       if (l2t_size < L2T_MIN_HASH_BUCKETS)
+               return NULL;
+
+       d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
        if (!d)
                return NULL;
 
+       d->l2t_start = l2t_start;
+       d->l2t_size = l2t_size;
+
        d->rover = d->l2tab;
-       atomic_set(&d->nfree, L2T_SIZE);
+       atomic_set(&d->nfree, l2t_size);
        rwlock_init(&d->lock);
 
-       for (i = 0; i < L2T_SIZE; ++i) {
+       for (i = 0; i < d->l2t_size; ++i) {
                d->l2tab[i].idx = i;
                d->l2tab[i].state = L2T_STATE_UNUSED;
                spin_lock_init(&d->l2tab[i].lock);
@@ -578,9 +591,9 @@ struct l2t_data *t4_init_l2t(void)
 
 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
 {
-       struct l2t_entry *l2tab = seq->private;
+       struct l2t_data *d = seq->private;
 
-       return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+       return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
 }
 
 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
@@ -620,6 +633,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
                         "Ethernet address  VLAN/P LP State Users Port\n");
        else {
                char ip[60];
+               struct l2t_data *d = seq->private;
                struct l2t_entry *e = v;
 
                spin_lock_bh(&e->lock);
@@ -628,7 +642,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
                else
                        sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
                seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
-                          e->idx, ip, e->dmac,
+                          e->idx + d->l2t_start, ip, e->dmac,
                           e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
                           l2e_state(e), atomic_read(&e->refcnt),
                           e->neigh ? e->neigh->dev->name : "");
@@ -652,7 +666,7 @@ static int l2t_seq_open(struct inode *inode, struct file *file)
                struct adapter *adap = inode->i_private;
                struct seq_file *seq = file->private_data;
 
-               seq->private = adap->l2t->l2tab;
+               seq->private = adap->l2t;
        }
        return rc;
 }
index a30126ce90cbabeaf50d7ed3fc5f1531c3831db9..b38dc526aad563a3b27b6b79e2e8d7c093b84ff0 100644 (file)
 #include <linux/if_ether.h>
 #include <linux/atomic.h>
 
+enum { L2T_SIZE = 4096 };     /* # of L2T entries */
+
+enum {
+       L2T_STATE_VALID,      /* entry is up to date */
+       L2T_STATE_STALE,      /* entry may be used but needs revalidation */
+       L2T_STATE_RESOLVING,  /* entry needs address resolution */
+       L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+       L2T_STATE_NOARP,      /* Netdev down or removed*/
+
+       /* when state is one of the below the entry is not hashed */
+       L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
+       L2T_STATE_UNUSED      /* entry not in use */
+};
+
 struct adapter;
 struct l2t_data;
 struct neighbour;
@@ -56,7 +70,7 @@ struct cpl_l2t_write_rpl;
  */
 struct l2t_entry {
        u16 state;                  /* entry state */
-       u16 idx;                    /* entry index */
+       u16 idx;                    /* entry index within in-memory table */
        u32 addr[4];                /* next hop IP or IPv6 address */
        int ifindex;                /* neighbor's net_device's ifindex */
        struct neighbour *neigh;    /* associated neighbour */
@@ -104,7 +118,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
                         u8 port, u8 *eth_addr);
-struct l2t_data *t4_init_l2t(void);
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
 void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
 
 extern const struct file_operations t4_l2t_fops;
index 942db078f33a6fa0332c627762695386f688aa07..d4248d74f5601b711c1d0c9d65923ec7acce627c 100644 (file)
@@ -1137,7 +1137,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
  */
 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       u32 wr_mid;
+       u32 wr_mid, ctrl0;
        u64 cntrl, *end;
        int qidx, credits;
        unsigned int flits, ndesc;
@@ -1274,9 +1274,15 @@ out_free:        dev_kfree_skb_any(skb);
 #endif /* CONFIG_CHELSIO_T4_FCOE */
        }
 
-       cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
-                          TXPKT_INTF_V(pi->tx_chan) |
-                          TXPKT_PF_V(adap->pf));
+       ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+               TXPKT_PF_V(adap->pf);
+#ifdef CONFIG_CHELSIO_T4_DCB
+       if (is_t4(adap->params.chip))
+               ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
+       else
+               ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
+#endif
+       cpl->ctrl0 = htonl(ctrl0);
        cpl->pack = htons(0);
        cpl->len = htons(skb->len);
        cpl->ctrl1 = cpu_to_be64(cntrl);
index 2b52aae7ec86d38b9e658e6e92d1f82ae3c6c879..91750ad580ae8fa66063d1ac7c4df9657b4e6ed1 100644 (file)
@@ -345,6 +345,43 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
                                       FW_CMD_MAX_TIMEOUT);
 }
 
+static int t4_edc_err_read(struct adapter *adap, int idx)
+{
+       u32 edc_ecc_err_addr_reg;
+       u32 rdata_reg;
+
+       if (is_t4(adap->params.chip)) {
+               CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
+               return 0;
+       }
+       if (idx != 0 && idx != 1) {
+               CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
+               return 0;
+       }
+
+       edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
+       rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
+
+       CH_WARN(adap,
+               "edc%d err addr 0x%x: 0x%x.\n",
+               idx, edc_ecc_err_addr_reg,
+               t4_read_reg(adap, edc_ecc_err_addr_reg));
+       CH_WARN(adap,
+               "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+               rdata_reg,
+               (unsigned long long)t4_read_reg64(adap, rdata_reg),
+               (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
+               (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
+               (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
+               (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
+               (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
+               (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
+               (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
+               (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
+
+       return 0;
+}
+
 /**
  *     t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
  *     @adap: the adapter
@@ -1322,9 +1359,10 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
        };
 
        static const unsigned int t6_reg_ranges[] = {
-               0x1008, 0x114c,
+               0x1008, 0x1124,
+               0x1138, 0x114c,
                0x1180, 0x11b4,
-               0x11fc, 0x1250,
+               0x11fc, 0x1254,
                0x1280, 0x133c,
                0x1800, 0x18fc,
                0x3000, 0x302c,
@@ -1345,18 +1383,18 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x5a80, 0x5a9c,
                0x5b94, 0x5bfc,
                0x5c10, 0x5ec0,
-               0x5ec8, 0x5ec8,
+               0x5ec8, 0x5ecc,
                0x6000, 0x6040,
-               0x6058, 0x6154,
+               0x6058, 0x619c,
                0x7700, 0x7798,
                0x77c0, 0x7880,
                0x78cc, 0x78fc,
                0x7b00, 0x7c54,
                0x7d00, 0x7efc,
-               0x8dc0, 0x8de0,
+               0x8dc0, 0x8de4,
                0x8df8, 0x8e84,
                0x8ea0, 0x8f88,
-               0x8fb8, 0x911c,
+               0x8fb8, 0x9124,
                0x9400, 0x9470,
                0x9600, 0x971c,
                0x9800, 0x9808,
@@ -1371,20 +1409,21 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x9f00, 0x9f6c,
                0x9f80, 0xa020,
                0xd004, 0xd03c,
+               0xd100, 0xd118,
+               0xd200, 0xd31c,
                0xdfc0, 0xdfe0,
                0xe000, 0xf008,
                0x11000, 0x11014,
-               0x11048, 0x11110,
-               0x11118, 0x1117c,
-               0x11190, 0x11260,
+               0x11048, 0x1117c,
+               0x11190, 0x11270,
                0x11300, 0x1130c,
-               0x12000, 0x1205c,
+               0x12000, 0x1206c,
                0x19040, 0x1906c,
                0x19078, 0x19080,
                0x1908c, 0x19124,
                0x19150, 0x191b0,
                0x191d0, 0x191e8,
-               0x19238, 0x192b8,
+               0x19238, 0x192bc,
                0x193f8, 0x19474,
                0x19490, 0x194cc,
                0x194f0, 0x194f8,
@@ -1461,12 +1500,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x1ff00, 0x1ff84,
                0x1ffc0, 0x1ffc8,
                0x30000, 0x30070,
-               0x30100, 0x3015c,
-               0x30190, 0x301d0,
-               0x30200, 0x30318,
+               0x30100, 0x301d0,
+               0x30200, 0x30320,
                0x30400, 0x3052c,
                0x30540, 0x3061c,
-               0x30800, 0x3088c,
+               0x30800, 0x30890,
                0x308c0, 0x30908,
                0x30910, 0x309b8,
                0x30a00, 0x30a04,
@@ -1539,12 +1577,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x33c24, 0x33c50,
                0x33cf0, 0x33cfc,
                0x34000, 0x34070,
-               0x34100, 0x3415c,
-               0x34190, 0x341d0,
-               0x34200, 0x34318,
+               0x34100, 0x341d0,
+               0x34200, 0x34320,
                0x34400, 0x3452c,
                0x34540, 0x3461c,
-               0x34800, 0x3488c,
+               0x34800, 0x34890,
                0x348c0, 0x34908,
                0x34910, 0x349b8,
                0x34a00, 0x34a04,
@@ -3281,6 +3318,8 @@ static void mem_intr_handler(struct adapter *adapter, int idx)
        if (v & ECC_CE_INT_CAUSE_F) {
                u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
 
+               t4_edc_err_read(adapter, idx);
+
                t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
                if (printk_ratelimit())
                        dev_warn(adapter->pdev_dev,
@@ -3488,7 +3527,9 @@ int t4_slow_intr_handler(struct adapter *adapter)
 void t4_intr_enable(struct adapter *adapter)
 {
        u32 val = 0;
-       u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+       u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+       u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+                       SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
        if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
                val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
@@ -3513,7 +3554,9 @@ void t4_intr_enable(struct adapter *adapter)
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-       u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+       u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+       u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+                       SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
        t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
@@ -3687,6 +3730,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
        return 0;
 }
 
+static unsigned int t4_use_ldst(struct adapter *adap)
+{
+       return (adap->flags & FW_OK) || !adap->use_bd;
+}
+
 /**
  *     t4_fw_tp_pio_rw - Access TP PIO through LDST
  *     @adap: the adapter
@@ -3730,7 +3778,7 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
  */
 void t4_read_rss_key(struct adapter *adap, u32 *key)
 {
-       if (adap->flags & FW_OK)
+       if (t4_use_ldst(adap))
                t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
        else
                t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3760,7 +3808,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
            (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
                rss_key_addr_cnt = 32;
 
-       if (adap->flags & FW_OK)
+       if (t4_use_ldst(adap))
                t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
        else
                t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3789,7 +3837,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
                           u32 *valp)
 {
-       if (adapter->flags & FW_OK)
+       if (t4_use_ldst(adapter))
                t4_fw_tp_pio_rw(adapter, valp, 1,
                                TP_RSS_PF0_CONFIG_A + index, 1);
        else
@@ -3829,7 +3877,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
 
        /* Grab the VFL/VFH values ...
         */
-       if (adapter->flags & FW_OK) {
+       if (t4_use_ldst(adapter)) {
                t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
                t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
        } else {
@@ -3850,7 +3898,7 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
 {
        u32 pfmap;
 
-       if (adapter->flags & FW_OK)
+       if (t4_use_ldst(adapter))
                t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
        else
                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3868,7 +3916,7 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
 {
        u32 pfmask;
 
-       if (adapter->flags & FW_OK)
+       if (t4_use_ldst(adapter))
                t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
        else
                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3924,43 +3972,25 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
  */
 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
 {
-       /* T6 and later has 2 channels */
-       if (adap->params.arch.nchan == NCHAN) {
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tnl_cong_drops, 8,
-                                TP_MIB_TNL_CNG_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tnl_tx_drops, 4,
-                                TP_MIB_TNL_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->ofld_vlan_drops, 4,
-                                TP_MIB_OFD_VLN_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tcp6_in_errs, 4,
-                                TP_MIB_TCP_V6IN_ERR_0_A);
-       } else {
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tnl_cong_drops, 2,
-                                TP_MIB_TNL_CNG_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->ofld_chan_drops, 2,
-                                TP_MIB_OFD_CHN_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->ofld_vlan_drops, 2,
-                                TP_MIB_OFD_VLN_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
-       }
+       int nchan = adap->params.arch.nchan;
+
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
+
        t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
                         &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
 }
@@ -3974,16 +4004,13 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
  */
 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
 {
-       /* T6 and later has 2 channels */
-       if (adap->params.arch.nchan == NCHAN) {
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
-                                8, TP_MIB_CPL_IN_REQ_0_A);
-       } else {
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
-                                2, TP_MIB_CPL_IN_REQ_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
-                                2, TP_MIB_CPL_OUT_RSP_0_A);
-       }
+       int nchan = adap->params.arch.nchan;
+
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+                        nchan, TP_MIB_CPL_IN_REQ_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+                        nchan, TP_MIB_CPL_OUT_RSP_0_A);
+
 }
 
 /**
@@ -6294,7 +6321,7 @@ int t4_init_tp_params(struct adapter *adap)
        /* Cache the adapter's Compressed Filter Mode and global Incress
         * Configuration.
         */
-       if (adap->flags & FW_OK) {
+       if (t4_use_ldst(adap)) {
                t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
                                TP_VLAN_PRI_MAP_A, 1);
                t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
index c8488f430d197337d7fb81d62689e2c12c583292..640369df8b3a3cb155eb35c9d7a08009d0716da2 100644 (file)
@@ -47,7 +47,6 @@ enum {
        TCB_SIZE       = 128,   /* TCB size */
        NMTUS          = 16,    /* size of MTU table */
        NCCTRL_WIN     = 32,    /* # of congestion control windows */
-       L2T_SIZE       = 4096,  /* # of L2T entries */
        PM_NSTATS      = 5,     /* # of PM stats */
        MBOX_LEN       = 64,    /* mailbox size in bytes */
        TRACE_LEN      = 112,   /* length of trace data and mask */
index 132cb8fc0bf7167703bab62109e384281ba33836..b99144afd4ecc8958961acf2673c53bdcd8fa8df 100644 (file)
@@ -660,6 +660,9 @@ struct cpl_tx_pkt {
 #define TXPKT_OVLAN_IDX_S    12
 #define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
 
+#define TXPKT_T5_OVLAN_IDX_S   12
+#define TXPKT_T5_OVLAN_IDX_V(x)        ((x) << TXPKT_T5_OVLAN_IDX_S)
+
 #define TXPKT_INTF_S    16
 #define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
 
index d7ca106927b0d93ee68480c2e45c29879fe9e510..8353a6cbfcc21edd2dde363fafd06b202611cae4 100644 (file)
@@ -142,6 +142,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
        CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
        CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
+       CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
        CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -155,6 +157,22 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+
+       /* T6 adapters:
+        */
+       CH_PCI_ID_TABLE_FENTRY(0x6001),
+       CH_PCI_ID_TABLE_FENTRY(0x6002),
+       CH_PCI_ID_TABLE_FENTRY(0x6003),
+       CH_PCI_ID_TABLE_FENTRY(0x6004),
+       CH_PCI_ID_TABLE_FENTRY(0x6005),
+       CH_PCI_ID_TABLE_FENTRY(0x6006),
+       CH_PCI_ID_TABLE_FENTRY(0x6007),
+       CH_PCI_ID_TABLE_FENTRY(0x6009),
+       CH_PCI_ID_TABLE_FENTRY(0x600d),
+       CH_PCI_ID_TABLE_FENTRY(0x6010),
+       CH_PCI_ID_TABLE_FENTRY(0x6011),
+       CH_PCI_ID_TABLE_FENTRY(0x6014),
+       CH_PCI_ID_TABLE_FENTRY(0x6015),
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
index 375a825573b0edb546fda908606fc1f6c6367212..e444dc4ebbd8fd11ec177a3f7d90d9d8930b18fb 100644 (file)
 #define  INGPACKBOUNDARY_G(x)  (((x) >> INGPACKBOUNDARY_S) \
                                 & INGPACKBOUNDARY_M)
 
+#define VFIFO_ENABLE_S    10
+#define VFIFO_ENABLE_V(x) ((x) << VFIFO_ENABLE_S)
+#define VFIFO_ENABLE_F    VFIFO_ENABLE_V(1U)
+
+#define SGE_DBVFIFO_BADDR_A 0x1138
+
+#define DBVFIFO_SIZE_S    6
+#define DBVFIFO_SIZE_M    0xfffU
+#define DBVFIFO_SIZE_G(x) (((x) >> DBVFIFO_SIZE_S) & DBVFIFO_SIZE_M)
+
+#define T6_DBVFIFO_SIZE_S    0
+#define T6_DBVFIFO_SIZE_M    0x1fffU
+#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+
 #define GLOBALENABLE_S    0
 #define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
 #define GLOBALENABLE_F    GLOBALENABLE_V(1U)
 #define SGE_FL_BUFFER_SIZE7_A 0x1060
 #define SGE_FL_BUFFER_SIZE8_A 0x1064
 
+#define SGE_IMSG_CTXT_BADDR_A 0x1088
+#define SGE_FLM_CACHE_BADDR_A 0x108c
 #define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
 
 #define THRESHOLD_0_S    24
 #define EGRTHRESHOLDPACKING_G(x) \
        (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
 
+#define T6_EGRTHRESHOLDPACKING_S    16
+#define T6_EGRTHRESHOLDPACKING_M    0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+       (((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
 #define SGE_TIMESTAMP_LO_A 0x1098
 #define SGE_TIMESTAMP_HI_A 0x109c
 
 #define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
 
 #define SGE_DBFIFO_STATUS_A 0x10a4
+#define SGE_DBVFIFO_SIZE_A 0x113c
 
 #define HP_INT_THRESH_S    28
 #define HP_INT_THRESH_M    0xfU
 /* registers for module MA */
 #define MA_EDRAM0_BAR_A 0x77c0
 
+#define EDRAM0_BASE_S    16
+#define EDRAM0_BASE_M    0xfffU
+#define EDRAM0_BASE_G(x) (((x) >> EDRAM0_BASE_S) & EDRAM0_BASE_M)
+
 #define EDRAM0_SIZE_S    0
 #define EDRAM0_SIZE_M    0xfffU
 #define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
 
 #define MA_EDRAM1_BAR_A 0x77c4
 
+#define EDRAM1_BASE_S    16
+#define EDRAM1_BASE_M    0xfffU
+#define EDRAM1_BASE_G(x) (((x) >> EDRAM1_BASE_S) & EDRAM1_BASE_M)
+
 #define EDRAM1_SIZE_S    0
 #define EDRAM1_SIZE_M    0xfffU
 #define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
 
 #define MA_EXT_MEMORY_BAR_A 0x77c8
 
+#define EXT_MEM_BASE_S    16
+#define EXT_MEM_BASE_M    0xfffU
+#define EXT_MEM_BASE_V(x) ((x) << EXT_MEM_BASE_S)
+#define EXT_MEM_BASE_G(x) (((x) >> EXT_MEM_BASE_S) & EXT_MEM_BASE_M)
+
 #define EXT_MEM_SIZE_S    0
 #define EXT_MEM_SIZE_M    0xfffU
 #define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
 
 #define MA_EXT_MEMORY1_BAR_A 0x7808
 
+#define EXT_MEM1_BASE_S    16
+#define EXT_MEM1_BASE_M    0xfffU
+#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
+
 #define EXT_MEM1_SIZE_S    0
 #define EXT_MEM1_SIZE_M    0xfffU
 #define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
 
 #define MA_EXT_MEMORY0_BAR_A 0x77c8
 
+#define EXT_MEM0_BASE_S    16
+#define EXT_MEM0_BASE_M    0xfffU
+#define EXT_MEM0_BASE_G(x) (((x) >> EXT_MEM0_BASE_S) & EXT_MEM0_BASE_M)
+
 #define EXT_MEM0_SIZE_S    0
 #define EXT_MEM0_SIZE_M    0xfffU
 #define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
 
 /* registers for module CIM */
 #define CIM_BOOT_CFG_A 0x7b00
+#define CIM_SDRAM_BASE_ADDR_A 0x7b14
+#define CIM_SDRAM_ADDR_SIZE_A 0x7b18
+#define CIM_EXTMEM2_BASE_ADDR_A 0x7b1c
+#define CIM_EXTMEM2_ADDR_SIZE_A 0x7b20
 #define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
 
 #define  BOOTADDR_M    0xffffff00U
 #define TP_OUT_CONFIG_A                0x7d04
 #define TP_GLOBAL_CONFIG_A     0x7d08
 
+#define TP_CMM_TCB_BASE_A 0x7d10
+#define TP_CMM_MM_BASE_A 0x7d14
+#define TP_CMM_TIMER_BASE_A 0x7d18
+#define TP_PMM_TX_BASE_A 0x7d20
+#define TP_PMM_RX_BASE_A 0x7d28
+#define TP_PMM_RX_PAGE_SIZE_A 0x7d2c
+#define TP_PMM_RX_MAX_PAGE_A 0x7d30
+#define TP_PMM_TX_PAGE_SIZE_A 0x7d34
+#define TP_PMM_TX_MAX_PAGE_A 0x7d38
+#define TP_CMM_MM_MAX_PSTRUCT_A 0x7e6c
+
+#define PMRXNUMCHN_S    31
+#define PMRXNUMCHN_V(x) ((x) << PMRXNUMCHN_S)
+#define PMRXNUMCHN_F    PMRXNUMCHN_V(1U)
+
+#define PMTXNUMCHN_S    30
+#define PMTXNUMCHN_M    0x3U
+#define PMTXNUMCHN_G(x) (((x) >> PMTXNUMCHN_S) & PMTXNUMCHN_M)
+
+#define PMTXMAXPAGE_S    0
+#define PMTXMAXPAGE_M    0x1fffffU
+#define PMTXMAXPAGE_G(x) (((x) >> PMTXMAXPAGE_S) & PMTXMAXPAGE_M)
+
+#define PMRXMAXPAGE_S    0
+#define PMRXMAXPAGE_M    0x1fffffU
+#define PMRXMAXPAGE_G(x) (((x) >> PMRXMAXPAGE_S) & PMRXMAXPAGE_M)
+
 #define DBGLAMODE_S    14
 #define DBGLAMODE_M    0x3U
 #define DBGLAMODE_G(x) (((x) >> DBGLAMODE_S) & DBGLAMODE_M)
 #define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
 
 #define TP_RSS_LKP_TABLE_A     0x7dec
+#define TP_CMM_MM_RX_FLST_BASE_A 0x7e60
+#define TP_CMM_MM_TX_FLST_BASE_A 0x7e64
+#define TP_CMM_MM_PS_FLST_BASE_A 0x7e68
 
 #define LKPTBLROWVLD_S    31
 #define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
 #define TP_MIB_RQE_DFR_PKT_A   0x64
 
 #define ULP_TX_INT_CAUSE_A     0x8dcc
+#define ULP_TX_TPT_LLIMIT_A    0x8dd4
+#define ULP_TX_TPT_ULIMIT_A    0x8dd8
+#define ULP_TX_PBL_LLIMIT_A    0x8ddc
+#define ULP_TX_PBL_ULIMIT_A    0x8de0
+#define ULP_TX_ERR_TABLE_BASE_A 0x8e04
 
 #define PBL_BOUND_ERR_CH3_S    31
 #define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
 #define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
 #define MATCHSRAM_F    MATCHSRAM_V(1U)
 
+#define MPS_RX_PG_RSV0_A 0x11010
+#define MPS_RX_PG_RSV4_A 0x11020
 #define MPS_RX_PERR_INT_CAUSE_A 0x11074
+#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
+#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
 
 #define MPS_CLS_TCAM_Y_L_A 0xf000
 #define MPS_CLS_TCAM_DATA0_A 0xf000
 #define MPS_CLS_TCAM_DATA1_A 0xf004
 
+#define USED_S    16
+#define USED_M    0x7ffU
+#define USED_G(x) (((x) >> USED_S) & USED_M)
+
+#define ALLOC_S    0
+#define ALLOC_M    0x7ffU
+#define ALLOC_G(x) (((x) >> ALLOC_S) & ALLOC_M)
+
+#define T5_USED_S    16
+#define T5_USED_M    0xfffU
+#define T5_USED_G(x) (((x) >> T5_USED_S) & T5_USED_M)
+
+#define T5_ALLOC_S    0
+#define T5_ALLOC_M    0xfffU
+#define T5_ALLOC_G(x) (((x) >> T5_ALLOC_S) & T5_ALLOC_M)
+
 #define DMACH_S    0
 #define DMACH_M    0xffffU
 #define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
 #define SLVFIFOPARINT_F    SLVFIFOPARINT_V(1U)
 
 #define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_LLIMIT_A 0x1915c
+#define ULP_RX_ISCSI_ULIMIT_A 0x19160
 #define ULP_RX_ISCSI_TAGMASK_A 0x19164
 #define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_TDDP_LLIMIT_A 0x1916c
+#define ULP_RX_TDDP_ULIMIT_A 0x19170
+#define ULP_RX_STAG_LLIMIT_A 0x1917c
+#define ULP_RX_STAG_ULIMIT_A 0x19180
+#define ULP_RX_RQ_LLIMIT_A 0x19184
+#define ULP_RX_RQ_ULIMIT_A 0x19188
+#define ULP_RX_PBL_LLIMIT_A 0x1918c
+#define ULP_RX_PBL_ULIMIT_A 0x19190
+#define ULP_RX_CTX_BASE_A 0x19194
+#define ULP_RX_RQUDP_LLIMIT_A 0x191a4
+#define ULP_RX_RQUDP_ULIMIT_A 0x191a8
 #define ULP_RX_LA_CTL_A 0x1923c
 #define ULP_RX_LA_RDPTR_A 0x19240
 #define ULP_RX_LA_RDDATA_A 0x19244
 #define SOURCEPF_M    0x7U
 #define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
 
+#define T6_SOURCEPF_S    9
+#define T6_SOURCEPF_M    0x7U
+#define T6_SOURCEPF_G(x) (((x) >> T6_SOURCEPF_S) & T6_SOURCEPF_M)
+
 #define PL_INT_CAUSE_A 0x1940c
 
 #define ULP_TX_S    27
 #define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
 #define T6_LIPMISS_F    T6_LIPMISS_V(1U)
 
+#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_HASH_TID_BASE_A 0x19c30
+#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
 #define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_TID_HASHBASE_A 0x19df8
+
+#define HASHEN_S    20
+#define HASHEN_V(x) ((x) << HASHEN_S)
+#define HASHEN_F    HASHEN_V(1U)
 
 #define REQQPARERR_S    16
 #define REQQPARERR_V(x) ((x) << REQQPARERR_S)
 #define LIP0_V(x) ((x) << LIP0_S)
 #define LIP0_F    LIP0_V(1U)
 
+#define BASEADDR_S    3
+#define BASEADDR_M    0x1fffffffU
+#define BASEADDR_G(x) (((x) >> BASEADDR_S) & BASEADDR_M)
+
 #define TCAMINTPERR_S    13
 #define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
 #define TCAMINTPERR_F    TCAMINTPERR_V(1U)
 #define EDC_H_BIST_DATA_PATTERN_A      0x50010
 #define EDC_H_BIST_STATUS_RDATA_A      0x50028
 
+#define EDC_H_ECC_ERR_ADDR_A           0x50084
 #define EDC_T51_BASE_ADDR              0x50800
 
-#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
-#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
 
 #define PL_VF_REV_A 0x4
 #define PL_VF_WHOAMI_A 0x0
index ad53e5ad2acd05afa1b94c09f7e2c1be8f4599a6..fa3786a9d30ea95223ab078da5efce42530d81f2 100644 (file)
@@ -1898,7 +1898,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
                rspq->unhandled_irqs++;
 
        val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
-       if (is_t4(rspq->adapter->params.chip)) {
+       /* If we don't have access to the new User GTS (T5+), use the old
+        * doorbell mechanism; otherwise use the new BAR2 mechanism.
+        */
+       if (unlikely(!rspq->bar2_addr)) {
                t4_write_reg(rspq->adapter,
                             T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
                             val | INGRESSQID_V((u32)rspq->cntxt_id));
@@ -1998,10 +2001,13 @@ static unsigned int process_intrq(struct adapter *adapter)
        }
 
        val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
-       if (is_t4(adapter->params.chip))
+       /* If we don't have access to the new User GTS (T5+), use the old
+        * doorbell mechanism; otherwise use the new BAR2 mechanism.
+        */
+       if (unlikely(!intrq->bar2_addr)) {
                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
                             val | INGRESSQID_V(intrq->cntxt_id));
-       else {
+       else {
                writel(val | INGRESSQID_V(intrq->bar2_qid),
                       intrq->bar2_addr + SGE_UDB_GTS);
                wmb();
@@ -2662,8 +2668,22 @@ int t4vf_sge_init(struct adapter *adapter)
         * give it more Free List entries.  (Note that the SGE's Egress
         * Congestion Threshold is in units of 2 Free List pointers.)
         */
-       s->fl_starve_thres
-               = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
+       switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+       case CHELSIO_T4:
+               s->fl_starve_thres =
+                  EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+               break;
+       case CHELSIO_T5:
+               s->fl_starve_thres =
+                  EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+               break;
+       case CHELSIO_T6:
+       default:
+               s->fl_starve_thres =
+                  T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+               break;
+       }
+       s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
 
        /*
         * Set up tasklet timers.
index 0db6dc9e9ed25f41273f9767ab470033be40ec5e..63dd5fdac5b91a5ba0186961a395d67f2c5cc7b2 100644 (file)
@@ -619,7 +619,8 @@ int t4vf_get_sge_params(struct adapter *adapter)
                 */
                whoami = t4_read_reg(adapter,
                                     T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
-               pf = SOURCEPF_G(whoami);
+               pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+                       SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
                s_hps = (HOSTPAGESIZEPF0_S +
                         (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
index f3f1601a76f37ffe7886da67427f6b79e8435321..f44a39c40642c147e03d79d2982031300e437d29 100644 (file)
@@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev,
        struct enic *enic = netdev_priv(netdev);
        struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
-       ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+               ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
        ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
        if (rxcoal->use_adaptive_rx_coalesce)
                ecmd->use_adaptive_rx_coalesce = 1;
@@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev,
        return 0;
 }
 
+static int enic_coalesce_valid(struct enic *enic,
+                              struct ethtool_coalesce *ec)
+{
+       u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
+       u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
+                                          ec->rx_coalesce_usecs_high);
+       u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
+                                         ec->rx_coalesce_usecs_low);
+
+       if (ec->rx_max_coalesced_frames         ||
+           ec->rx_coalesce_usecs_irq           ||
+           ec->rx_max_coalesced_frames_irq     ||
+           ec->tx_max_coalesced_frames         ||
+           ec->tx_coalesce_usecs_irq           ||
+           ec->tx_max_coalesced_frames_irq     ||
+           ec->stats_block_coalesce_usecs      ||
+           ec->use_adaptive_tx_coalesce        ||
+           ec->pkt_rate_low                    ||
+           ec->rx_max_coalesced_frames_low     ||
+           ec->tx_coalesce_usecs_low           ||
+           ec->tx_max_coalesced_frames_low     ||
+           ec->pkt_rate_high                   ||
+           ec->rx_max_coalesced_frames_high    ||
+           ec->tx_coalesce_usecs_high          ||
+           ec->tx_max_coalesced_frames_high    ||
+           ec->rate_sample_interval)
+               return -EINVAL;
+
+       if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
+           ec->tx_coalesce_usecs)
+               return -EINVAL;
+
+       if ((ec->tx_coalesce_usecs > coalesce_usecs_max)        ||
+           (ec->rx_coalesce_usecs > coalesce_usecs_max)        ||
+           (ec->rx_coalesce_usecs_low > coalesce_usecs_max)    ||
+           (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
+               netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
+                           coalesce_usecs_max);
+
+       if (ec->rx_coalesce_usecs_high &&
+           (rx_coalesce_usecs_high <
+            rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+               return -EINVAL;
+
+       return 0;
+}
+
 static int enic_set_coalesce(struct net_device *netdev,
        struct ethtool_coalesce *ecmd)
 {
@@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev,
        u32 rx_coalesce_usecs_high;
        u32 coalesce_usecs_max;
        unsigned int i, intr;
+       int ret;
        struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
+       ret = enic_coalesce_valid(enic, ecmd);
+       if (ret)
+               return ret;
        coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
        tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
                                  coalesce_usecs_max);
@@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev,
        rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
                                       coalesce_usecs_max);
 
-       switch (vnic_dev_get_intr_mode(enic->vdev)) {
-       case VNIC_DEV_INTR_MODE_INTX:
-               if (tx_coalesce_usecs != rx_coalesce_usecs)
-                       return -EINVAL;
-               if (ecmd->use_adaptive_rx_coalesce      ||
-                   ecmd->rx_coalesce_usecs_low         ||
-                   ecmd->rx_coalesce_usecs_high)
-                       return -EINVAL;
-
-               intr = enic_legacy_io_intr();
-               vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                       tx_coalesce_usecs);
-               break;
-       case VNIC_DEV_INTR_MODE_MSI:
-               if (tx_coalesce_usecs != rx_coalesce_usecs)
-                       return -EINVAL;
-               if (ecmd->use_adaptive_rx_coalesce      ||
-                   ecmd->rx_coalesce_usecs_low         ||
-                   ecmd->rx_coalesce_usecs_high)
-                       return -EINVAL;
-
-               vnic_intr_coalescing_timer_set(&enic->intr[0],
-                       tx_coalesce_usecs);
-               break;
-       case VNIC_DEV_INTR_MODE_MSIX:
-               if (ecmd->rx_coalesce_usecs_high &&
-                   (rx_coalesce_usecs_high <
-                    rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
-                               return -EINVAL;
-
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
                for (i = 0; i < enic->wq_count; i++) {
                        intr = enic_msix_wq_intr(enic, i);
                        vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                               tx_coalesce_usecs);
-               }
-
-               rxcoal->use_adaptive_rx_coalesce =
-                                       !!ecmd->use_adaptive_rx_coalesce;
-               if (!rxcoal->use_adaptive_rx_coalesce)
-                       enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-
-               if (ecmd->rx_coalesce_usecs_high) {
-                       rxcoal->range_end = rx_coalesce_usecs_high;
-                       rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
-                       rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
-                                                       ENIC_AIC_LARGE_PKT_DIFF;
+                                                      tx_coalesce_usecs);
                }
-               break;
-       default:
-               break;
+               enic->tx_coalesce_usecs = tx_coalesce_usecs;
+       }
+       rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
+       if (!rxcoal->use_adaptive_rx_coalesce)
+               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+       if (ecmd->rx_coalesce_usecs_high) {
+               rxcoal->range_end = rx_coalesce_usecs_high;
+               rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+               rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+                                               ENIC_AIC_LARGE_PKT_DIFF;
        }
 
-       enic->tx_coalesce_usecs = tx_coalesce_usecs;
        enic->rx_coalesce_usecs = rx_coalesce_usecs;
 
        return 0;
index 918a8e42139b1f8ba9d95a4a9a1300c014324db8..8f646e4e968b329ab53dcb70af3c733e7788661f 100644 (file)
@@ -1149,6 +1149,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
        return 0;
 }
 
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+       unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       u32 timer = cq->tobe_rx_coal_timeval;
+
+       if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+               vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+               cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+       }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+       struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+       int index;
+       u32 timer;
+       u32 range_start;
+       u32 traffic;
+       u64 delta;
+       ktime_t now = ktime_get();
+
+       delta = ktime_us_delta(now, cq->prev_ts);
+       if (delta < ENIC_AIC_TS_BREAK)
+               return;
+       cq->prev_ts = now;
+
+       traffic = pkt_size_counter->large_pkt_bytes_cnt +
+                 pkt_size_counter->small_pkt_bytes_cnt;
+       /* The table takes Mbps
+        * traffic *= 8    => bits
+        * traffic *= (10^6 / delta)    => bps
+        * traffic /= 10^6     => Mbps
+        *
+        * Combining, traffic *= (8 / delta)
+        */
+
+       traffic <<= 3;
+       traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+       for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+               if (traffic < mod_table[index].rx_rate)
+                       break;
+       range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+                      pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+                     rx_coal->small_pkt_range_start :
+                     rx_coal->large_pkt_range_start;
+       timer = range_start + ((rx_coal->range_end - range_start) *
+                              mod_table[index].range_percent / 100);
+       /* Damping */
+       cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+       pkt_size_counter->large_pkt_bytes_cnt = 0;
+       pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
 static int enic_poll(struct napi_struct *napi, int budget)
 {
        struct net_device *netdev = napi->dev;
@@ -1199,6 +1257,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
 
        if (err)
                rq_work_done = rq_work_to_do;
+       if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+               /* Call the function which refreshes the intr coalescing timer
+                * value based on the traffic.
+                */
+               enic_calc_int_moderation(enic, &enic->rq[0]);
 
        if (rq_work_done < rq_work_to_do) {
 
@@ -1207,70 +1270,14 @@ static int enic_poll(struct napi_struct *napi, int budget)
                 */
 
                napi_complete(napi);
+               if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+                       enic_set_int_moderation(enic, &enic->rq[0]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
 
        return rq_work_done;
 }
 
-static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
-       unsigned int intr = enic_msix_rq_intr(enic, rq->index);
-       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-       u32 timer = cq->tobe_rx_coal_timeval;
-
-       if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
-               vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
-               cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
-       }
-}
-
-static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
-       struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-       struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
-       int index;
-       u32 timer;
-       u32 range_start;
-       u32 traffic;
-       u64 delta;
-       ktime_t now = ktime_get();
-
-       delta = ktime_us_delta(now, cq->prev_ts);
-       if (delta < ENIC_AIC_TS_BREAK)
-               return;
-       cq->prev_ts = now;
-
-       traffic = pkt_size_counter->large_pkt_bytes_cnt +
-                 pkt_size_counter->small_pkt_bytes_cnt;
-       /* The table takes Mbps
-        * traffic *= 8    => bits
-        * traffic *= (10^6 / delta)    => bps
-        * traffic /= 10^6     => Mbps
-        *
-        * Combining, traffic *= (8 / delta)
-        */
-
-       traffic <<= 3;
-       traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
-
-       for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
-               if (traffic < mod_table[index].rx_rate)
-                       break;
-       range_start = (pkt_size_counter->small_pkt_bytes_cnt >
-                      pkt_size_counter->large_pkt_bytes_cnt << 1) ?
-                     rx_coal->small_pkt_range_start :
-                     rx_coal->large_pkt_range_start;
-       timer = range_start + ((rx_coal->range_end - range_start) *
-                              mod_table[index].range_percent / 100);
-       /* Damping */
-       cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
-
-       pkt_size_counter->large_pkt_bytes_cnt = 0;
-       pkt_size_counter->small_pkt_bytes_cnt = 0;
-}
-
 #ifdef CONFIG_RFS_ACCEL
 static void enic_free_rx_cpu_rmap(struct enic *enic)
 {
@@ -1407,10 +1414,8 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
        if (err)
                work_done = work_to_do;
        if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
-               /* Call the function which refreshes
-                * the intr coalescing timer value based on
-                * the traffic.  This is supported only in
-                * the case of MSI-x mode
+               /* Call the function which refreshes the intr coalescing timer
+                * value based on the traffic.
                 */
                enic_calc_int_moderation(enic, &enic->rq[rq]);
 
@@ -1569,12 +1574,6 @@ static void enic_set_rx_coal_setting(struct enic *enic)
        int index = -1;
        struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
 
-       /* If intr mode is not MSIX, do not do adaptive coalescing */
-       if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
-               netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
-               return;
-       }
-
        /* 1. Read the link speed from fw
         * 2. Pick the default range for the speed
         * 3. Update it in enic->rx_coalesce_setting
index d1017509b08ac1e171a12a89770373a5057c5d64..f7b42483921c5847a883a286d28f7700e10b1d28 100644 (file)
@@ -604,19 +604,7 @@ static struct pci_driver pci_driver = {
        .probe          = ec_bhf_probe,
        .remove         = ec_bhf_remove,
 };
-
-static int __init ec_bhf_init(void)
-{
-       return pci_register_driver(&pci_driver);
-}
-
-static void __exit ec_bhf_exit(void)
-{
-       pci_unregister_driver(&pci_driver);
-}
-
-module_init(ec_bhf_init);
-module_exit(ec_bhf_exit);
+module_pci_driver(pci_driver);
 
 module_param(polling_frequency, long, S_IRUGO);
 MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
index 8d12b41b3b1990af468da5a38c4759fc005ba3d3..0a27805cbbbd0e14f2988ffec5f207909628d65b 100644 (file)
@@ -37,7 +37,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "10.6.0.2"
+#define DRV_VER                        "10.6.0.3"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
 
 #define MAX_VFS                        30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN             32
+#define        CNTL_SERIAL_NUM_WORDS   8  /* Controller serial number words */
+#define        CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */
 
 #define        RSS_INDIR_TABLE_LEN     128
 #define RSS_HASH_KEY_LEN       40
@@ -228,6 +230,7 @@ struct be_mcc_obj {
 struct be_tx_stats {
        u64 tx_bytes;
        u64 tx_pkts;
+       u64 tx_vxlan_offload_pkts;
        u64 tx_reqs;
        u64 tx_compl;
        ulong tx_jiffies;
@@ -275,6 +278,7 @@ struct be_rx_page_info {
 struct be_rx_stats {
        u64 rx_bytes;
        u64 rx_pkts;
+       u64 rx_vxlan_offload_pkts;
        u32 rx_drops_no_skbs;   /* skb allocation errors */
        u32 rx_drops_no_frags;  /* HW has no fetched frags */
        u32 rx_post_fail;       /* page post alloc failures */
@@ -590,6 +594,7 @@ struct be_adapter {
        struct rss_info rss_info;
        /* Filters for packets that need to be sent to BMC */
        u32 bmc_filt_mask;
+       u16 serial_num[CNTL_SERIAL_NUM_WORDS];
 };
 
 #define be_physfn(adapter)             (!adapter->virtfn)
index 9eac3227d2cabc15c2d21a4baafafc3761372560..3be1fbdcdd0215cbd6589001b3a11c2091d309c2 100644 (file)
@@ -88,19 +88,21 @@ static inline void *embedded_payload(struct be_mcc_wrb *wrb)
        return wrb->payload.embedded_payload;
 }
 
-static void be_mcc_notify(struct be_adapter *adapter)
+static int be_mcc_notify(struct be_adapter *adapter)
 {
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        u32 val = 0;
 
        if (be_check_error(adapter, BE_ERROR_ANY))
-               return;
+               return -EIO;
 
        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 
        wmb();
        iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+
+       return 0;
 }
 
 /* To check if valid bit is set, check the entire word as we don't know
@@ -170,6 +172,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
                return;
        }
 
+       if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
+           subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+               complete(&adapter->et_cmd_compl);
+               return;
+       }
+
        if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
             opcode == OPCODE_COMMON_WRITE_OBJECT) &&
            subsystem == CMD_SUBSYSTEM_COMMON) {
@@ -541,7 +549,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
 
        resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto out;
 
        status = be_mcc_wait_compl(adapter);
        if (status == -EIO)
@@ -1547,7 +1557,10 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        else
                hdr->version = 2;
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err;
+
        adapter->stats_cmd_sent = true;
 
 err:
@@ -1583,7 +1596,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
        req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
        req->cmd_params.params.reset_stats = 0;
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err;
+
        adapter->stats_cmd_sent = true;
 
 err:
@@ -1687,8 +1703,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
                               OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
                               sizeof(*req), wrb, NULL);
 
-       be_mcc_notify(adapter);
-
+       status = be_mcc_notify(adapter);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -1860,7 +1875,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
                                cpu_to_le32(set_eqd[i].delay_multiplier);
        }
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -1953,7 +1968,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
                        memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
        }
 
-       status = be_mcc_notify_wait(adapter);
+       status = be_mcc_notify(adapter);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -2320,7 +2335,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
                                sizeof(struct lancer_cmd_req_write_object)));
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err_unlock;
+
        spin_unlock_bh(&adapter->mcc_lock);
 
        if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2491,7 +2509,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req->params.op_code = cpu_to_le32(flash_opcode);
        req->params.data_buf_size = cpu_to_le32(buf_size);
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err_unlock;
+
        spin_unlock_bh(&adapter->mcc_lock);
 
        if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2585,7 +2606,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
        wrb = wrb_from_mccq(adapter);
        if (!wrb) {
                status = -EBUSY;
-               goto err;
+               goto err_unlock;
        }
 
        req = embedded_payload(wrb);
@@ -2599,8 +2620,19 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
        req->loopback_type = loopback_type;
        req->loopback_state = enable;
 
-       status = be_mcc_notify_wait(adapter);
-err:
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err_unlock;
+
+       spin_unlock_bh(&adapter->mcc_lock);
+
+       if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+                                        msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+               status = -ETIMEDOUT;
+
+       return status;
+
+err_unlock:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
@@ -2636,7 +2668,9 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
        req->num_pkts = cpu_to_le32(num_pkts);
        req->loopback_type = cpu_to_le32(loopback_type);
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err;
 
        spin_unlock_bh(&adapter->mcc_lock);
 
@@ -2818,10 +2852,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_cntl_attribs *req;
        struct be_cmd_resp_cntl_attribs *resp;
-       int status;
+       int status, i;
        int payload_len = max(sizeof(*req), sizeof(*resp));
        struct mgmt_controller_attrib *attribs;
        struct be_dma_mem attribs_cmd;
+       u32 *serial_num;
 
        if (mutex_lock_interruptible(&adapter->mbox_lock))
                return -1;
@@ -2852,6 +2887,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        if (!status) {
                attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
                adapter->hba_port_num = attribs->hba_attribs.phy_port;
+               serial_num = attribs->hba_attribs.controller_serial_number;
+               for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+                       adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+                               (BIT_MASK(16) - 1);
        }
 
 err:
index 2716e6f30d9a0949633b40dc9864196c7465fa3a..36d835bd5f3c06f86020e4c192c02cf074bf2340 100644 (file)
@@ -1495,6 +1495,8 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
 #define BE_PME_D3COLD_CAP              0x80
 
 /********************** LoopBack test *********************/
+#define SET_LB_MODE_TIMEOUT            12000
+
 struct be_cmd_req_loopback_test {
        struct be_cmd_req_hdr hdr;
        u32 loopback_type;
@@ -1635,10 +1637,12 @@ struct be_cmd_req_set_qos {
 struct mgmt_hba_attribs {
        u32 rsvd0[24];
        u8 controller_model_number[32];
-       u32 rsvd1[79];
-       u8 rsvd2[3];
+       u32 rsvd1[16];
+       u32 controller_serial_number[8];
+       u32 rsvd2[55];
+       u8 rsvd3[3];
        u8 phy_port;
-       u32 rsvd3[13];
+       u32 rsvd4[13];
 } __packed;
 
 struct mgmt_controller_attrib {
@@ -1758,6 +1762,7 @@ struct be_cmd_req_set_mac_list {
 /*********************** HSW Config ***********************/
 #define PORT_FWD_TYPE_VEPA             0x3
 #define PORT_FWD_TYPE_VEB              0x2
+#define PORT_FWD_TYPE_PASSTHRU         0x1
 
 #define ENABLE_MAC_SPOOFCHK            0x2
 #define DISABLE_MAC_SPOOFCHK           0x3
index b2476dbfd103120affb5e216a31d304dda570a67..2c9ed1710ba6f4c16d8a3d800602b6d2337abb59 100644 (file)
@@ -138,6 +138,7 @@ static const struct be_ethtool_stat et_stats[] = {
 static const struct be_ethtool_stat et_rx_stats[] = {
        {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
        {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+       {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
        {DRVSTAT_RX_INFO(rx_compl)},
        {DRVSTAT_RX_INFO(rx_compl_err)},
        {DRVSTAT_RX_INFO(rx_mcast_pkts)},
@@ -190,6 +191,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
        {DRVSTAT_TX_INFO(tx_internal_parity_err)},
        {DRVSTAT_TX_INFO(tx_bytes)},
        {DRVSTAT_TX_INFO(tx_pkts)},
+       {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
        /* Number of skbs queued for trasmission by the driver */
        {DRVSTAT_TX_INFO(tx_reqs)},
        /* Number of times the TX queue was stopped due to lack
@@ -847,10 +849,21 @@ err:
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
                            u64 *status)
 {
-       be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
+       int ret;
+
+       ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+                                 loopback_type, 1);
+       if (ret)
+               return ret;
+
        *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
                                       loopback_type, 1500, 2, 0xabc);
-       be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
+
+       ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+                                 BE_NO_LOOPBACK, 1);
+       if (ret)
+               return ret;
+
        return *status;
 }
 
index 6f642426308c67399eac3abdb20ae6160ce41d2a..d86bc5d5224627a812ba0a430c21f7a4f23513b3 100644 (file)
@@ -677,11 +677,14 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
 {
        struct be_tx_stats *stats = tx_stats(txo);
+       u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
 
        u64_stats_update_begin(&stats->sync);
        stats->tx_reqs++;
        stats->tx_bytes += skb->len;
-       stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
+       stats->tx_pkts += tx_pkts;
+       if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+               stats->tx_vxlan_offload_pkts += tx_pkts;
        u64_stats_update_end(&stats->sync);
 }
 
@@ -1254,7 +1257,7 @@ static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
        if (is_udp_pkt((*skb))) {
                struct udphdr *udp = udp_hdr((*skb));
 
-               switch (udp->dest) {
+               switch (ntohs(udp->dest)) {
                case DHCP_CLIENT_PORT:
                        os2bmc = is_dhcp_client_filt_enabled(adapter);
                        goto done;
@@ -1957,6 +1960,8 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
        stats->rx_compl++;
        stats->rx_bytes += rxcp->pkt_size;
        stats->rx_pkts++;
+       if (rxcp->tunneled)
+               stats->rx_vxlan_offload_pkts++;
        if (rxcp->pkt_type == BE_MULTICAST_PACKET)
                stats->rx_mcast_pkts++;
        if (rxcp->err)
@@ -3529,15 +3534,15 @@ err:
 
 static int be_setup_wol(struct be_adapter *adapter, bool enable)
 {
+       struct device *dev = &adapter->pdev->dev;
        struct be_dma_mem cmd;
-       int status = 0;
        u8 mac[ETH_ALEN];
+       int status;
 
        eth_zero_addr(mac);
 
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
-       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
-                                    GFP_KERNEL);
+       cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3546,24 +3551,18 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
                                                PCICFG_PM_CONTROL_OFFSET,
                                                PCICFG_PM_CONTROL_MASK);
                if (status) {
-                       dev_err(&adapter->pdev->dev,
-                               "Could not enable Wake-on-lan\n");
-                       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
-                                         cmd.dma);
-                       return status;
+                       dev_err(dev, "Could not enable Wake-on-lan\n");
+                       goto err;
                }
-               status = be_cmd_enable_magic_wol(adapter,
-                                                adapter->netdev->dev_addr,
-                                                &cmd);
-               pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
-               pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
        } else {
-               status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
-               pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
-               pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+               ether_addr_copy(mac, adapter->netdev->dev_addr);
        }
 
-       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+       status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+       pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+       pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+err:
+       dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
@@ -4924,7 +4923,7 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
 {
        if (!fhdr) {
                dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
-               return -1;
+               return false;
        }
 
        /* First letter of the build version is used to identify
@@ -5079,9 +5078,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        int status = 0;
        u8 hsw_mode;
 
-       if (!sriov_enabled(adapter))
-               return 0;
-
        /* BE and Lancer chips support VEB mode only */
        if (BEx_chip(adapter) || lancer_chip(adapter)) {
                hsw_mode = PORT_FWD_TYPE_VEB;
@@ -5091,6 +5087,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                               NULL);
                if (status)
                        return 0;
+
+               if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
+                       return 0;
        }
 
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
@@ -5225,6 +5224,27 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
 }
 #endif
 
+static int be_get_phys_port_id(struct net_device *dev,
+                              struct netdev_phys_item_id *ppid)
+{
+       int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+       struct be_adapter *adapter = netdev_priv(dev);
+       u8 *id;
+
+       if (MAX_PHYS_ITEM_ID_LEN < id_len)
+               return -ENOSPC;
+
+       ppid->id[0] = adapter->hba_port_num + 1;
+       id = &ppid->id[1];
+       for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+            i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+               memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+       ppid->id_len = id_len;
+
+       return 0;
+}
+
 static const struct net_device_ops be_netdev_ops = {
        .ndo_open               = be_open,
        .ndo_stop               = be_close,
@@ -5255,6 +5275,7 @@ static const struct net_device_ops be_netdev_ops = {
        .ndo_del_vxlan_port     = be_del_vxlan_port,
        .ndo_features_check     = be_features_check,
 #endif
+       .ndo_get_phys_port_id   = be_get_phys_port_id,
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -5813,7 +5834,6 @@ static int be_pci_resume(struct pci_dev *pdev)
        if (status)
                return status;
 
-       pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 
        status = be_resume(adapter);
@@ -5893,7 +5913,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
 
        pci_set_master(pdev);
-       pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 
        /* Check if card is ok and fw is ready */
index 1eee73cccdf58deba85c810399930ffa55dfa03c..99d33e2d35e6c2c219fbd06f34546619a01a586d 100644 (file)
@@ -562,6 +562,7 @@ struct fec_enet_private {
 };
 
 void fec_ptp_init(struct platform_device *pdev);
+void fec_ptp_stop(struct platform_device *pdev);
 void fec_ptp_start_cyclecounter(struct net_device *ndev);
 int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
 int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
index 42e20e5385acb553b528ee8a6b275791e9ad1b44..32e3807c650ea7256b09f0c9e406a8219cb2bb78 100644 (file)
@@ -3142,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
                        fep->bufdesc_size;
 
        /* Allocate memory for buffer descriptors. */
-       cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
-                                     GFP_KERNEL);
+       cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+                                      GFP_KERNEL);
        if (!cbd_base) {
                return -ENOMEM;
        }
@@ -3431,6 +3431,11 @@ fec_probe(struct platform_device *pdev)
                fep->reg_phy = NULL;
        }
 
+       pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
        fec_reset_phy(pdev);
 
        if (fep->bufdesc_ex)
@@ -3465,8 +3470,6 @@ fec_probe(struct platform_device *pdev)
        netif_carrier_off(ndev);
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&pdev->dev);
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
 
        ret = register_netdev(ndev);
        if (ret)
@@ -3481,8 +3484,6 @@ fec_probe(struct platform_device *pdev)
        fep->rx_copybreak = COPYBREAK_DEFAULT;
        INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
 
-       pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
-       pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_mark_last_busy(&pdev->dev);
        pm_runtime_put_autosuspend(&pdev->dev);
 
@@ -3493,6 +3494,7 @@ failed_register:
 failed_mii_init:
 failed_irq:
 failed_init:
+       fec_ptp_stop(pdev);
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
 failed_regulator:
@@ -3514,14 +3516,12 @@ fec_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       cancel_delayed_work_sync(&fep->time_keep);
        cancel_work_sync(&fep->tx_timeout_work);
+       fec_ptp_stop(pdev);
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
        if (fep->reg_phy)
                regulator_disable(fep->reg_phy);
-       if (fep->ptp_clock)
-               ptp_clock_unregister(fep->ptp_clock);
        of_node_put(fep->phy_node);
        free_netdev(ndev);
 
index a15663ad7f5e98c64a1f6e0912163bee578c938b..1543cf0e8ef647c92f1748e474833cf55300a490 100644 (file)
@@ -506,12 +506,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
                break;
 
        default:
-               /*
-                * register RXMTRL must be set in order to do V1 packets,
-                * therefore it is not possible to time stamp both V1 Sync and
-                * Delay_Req messages and hardware does not support
-                * timestamping all packets => return error
-                */
                fep->hwts_rx_en = 1;
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
@@ -604,6 +598,16 @@ void fec_ptp_init(struct platform_device *pdev)
        schedule_delayed_work(&fep->time_keep, HZ);
 }
 
+void fec_ptp_stop(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       cancel_delayed_work_sync(&fep->time_keep);
+       if (fep->ptp_clock)
+               ptp_clock_unregister(fep->ptp_clock);
+}
+
 /**
  * fec_ptp_check_pps_event
  * @fep: the fec_enet_private structure handle
index ff875028fdff5e1723c618f721658971cd051603..087ffcdc48a312d365ffb24ee4f7c16ddcf18edb 100644 (file)
 
 #define TX_TIMEOUT      (1*HZ)
 
-const char gfar_driver_version[] = "1.3";
+const char gfar_driver_version[] = "2.0";
 
 static int gfar_enet_open(struct net_device *dev);
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void gfar_reset_task(struct work_struct *work);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
-static struct sk_buff *gfar_new_skb(struct net_device *dev,
-                                   dma_addr_t *bufaddr);
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+                               int alloc_cnt);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev);
 #endif
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-                              int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
 static void gfar_halt_nodisable(struct gfar_private *priv);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
        bdp->lstatus = cpu_to_be32(lstatus);
 }
 
-static int gfar_init_bds(struct net_device *ndev)
+static void gfar_init_bds(struct net_device *ndev)
 {
        struct gfar_private *priv = netdev_priv(ndev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        struct gfar_priv_tx_q *tx_queue = NULL;
        struct gfar_priv_rx_q *rx_queue = NULL;
        struct txbd8 *txbdp;
-       struct rxbd8 *rxbdp;
        u32 __iomem *rfbptr;
        int i, j;
-       dma_addr_t bufaddr;
 
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_queue = priv->tx_queue[i];
@@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev)
        rfbptr = &regs->rfbptr0;
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               rx_queue->cur_rx = rx_queue->rx_bd_base;
-               rx_queue->skb_currx = 0;
-               rxbdp = rx_queue->rx_bd_base;
 
-               for (j = 0; j < rx_queue->rx_ring_size; j++) {
-                       struct sk_buff *skb = rx_queue->rx_skbuff[j];
+               rx_queue->next_to_clean = 0;
+               rx_queue->next_to_use = 0;
+               rx_queue->next_to_alloc = 0;
 
-                       if (skb) {
-                               bufaddr = be32_to_cpu(rxbdp->bufPtr);
-                       } else {
-                               skb = gfar_new_skb(ndev, &bufaddr);
-                               if (!skb) {
-                                       netdev_err(ndev, "Can't allocate RX buffers\n");
-                                       return -ENOMEM;
-                               }
-                               rx_queue->rx_skbuff[j] = skb;
-                       }
-
-                       gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
-                       rxbdp++;
-               }
+               /* make sure next_to_clean != next_to_use after this
+                * by leaving at least 1 unused descriptor
+                */
+               gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
 
                rx_queue->rfbptr = rfbptr;
                rfbptr += 2;
        }
-
-       return 0;
 }
 
 static int gfar_alloc_skb_resources(struct net_device *ndev)
 {
        void *vaddr;
        dma_addr_t addr;
-       int i, j, k;
+       int i, j;
        struct gfar_private *priv = netdev_priv(ndev);
        struct device *dev = priv->dev;
        struct gfar_priv_tx_q *tx_queue = NULL;
@@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
                rx_queue = priv->rx_queue[i];
                rx_queue->rx_bd_base = vaddr;
                rx_queue->rx_bd_dma_base = addr;
-               rx_queue->dev = ndev;
+               rx_queue->ndev = ndev;
+               rx_queue->dev = dev;
                addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
                vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
        }
@@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
                if (!tx_queue->tx_skbuff)
                        goto cleanup;
 
-               for (k = 0; k < tx_queue->tx_ring_size; k++)
-                       tx_queue->tx_skbuff[k] = NULL;
+               for (j = 0; j < tx_queue->tx_ring_size; j++)
+                       tx_queue->tx_skbuff[j] = NULL;
        }
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               rx_queue->rx_skbuff =
-                       kmalloc_array(rx_queue->rx_ring_size,
-                                     sizeof(*rx_queue->rx_skbuff),
-                                     GFP_KERNEL);
-               if (!rx_queue->rx_skbuff)
+               rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+                                           sizeof(*rx_queue->rx_buff),
+                                           GFP_KERNEL);
+               if (!rx_queue->rx_buff)
                        goto cleanup;
-
-               for (j = 0; j < rx_queue->rx_ring_size; j++)
-                       rx_queue->rx_skbuff[j] = NULL;
        }
 
-       if (gfar_init_bds(ndev))
-               goto cleanup;
+       gfar_init_bds(ndev);
 
        return 0;
 
@@ -354,10 +333,8 @@ static void gfar_init_rqprm(struct gfar_private *priv)
        }
 }
 
-static void gfar_rx_buff_size_config(struct gfar_private *priv)
+static void gfar_rx_offload_en(struct gfar_private *priv)
 {
-       int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
        /* set this when rx hw offload (TOE) functions are being used */
        priv->uses_rxfcb = 0;
 
@@ -366,16 +343,6 @@ static void gfar_rx_buff_size_config(struct gfar_private *priv)
 
        if (priv->hwts_rx_en)
                priv->uses_rxfcb = 1;
-
-       if (priv->uses_rxfcb)
-               frame_size += GMAC_FCB_LEN;
-
-       frame_size += priv->padding;
-
-       frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
-                    INCREMENTAL_BUFFER_SIZE;
-
-       priv->rx_buffer_size = frame_size;
 }
 
 static void gfar_mac_rx_config(struct gfar_private *priv)
@@ -565,22 +532,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
        }
 }
 
-static void lock_tx_qs(struct gfar_private *priv)
-{
-       int i;
-
-       for (i = 0; i < priv->num_tx_queues; i++)
-               spin_lock(&priv->tx_queue[i]->txlock);
-}
-
-static void unlock_tx_qs(struct gfar_private *priv)
-{
-       int i;
-
-       for (i = 0; i < priv->num_tx_queues; i++)
-               spin_unlock(&priv->tx_queue[i]->txlock);
-}
-
 static int gfar_alloc_tx_queues(struct gfar_private *priv)
 {
        int i;
@@ -609,9 +560,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
                if (!priv->rx_queue[i])
                        return -ENOMEM;
 
-               priv->rx_queue[i]->rx_skbuff = NULL;
                priv->rx_queue[i]->qindex = i;
-               priv->rx_queue[i]->dev = priv->ndev;
+               priv->rx_queue[i]->ndev = priv->ndev;
        }
        return 0;
 }
@@ -1203,12 +1153,11 @@ void gfar_mac_reset(struct gfar_private *priv)
 
        udelay(3);
 
-       /* Compute rx_buff_size based on config flags */
-       gfar_rx_buff_size_config(priv);
+       gfar_rx_offload_en(priv);
 
        /* Initialize the max receive frame/buffer lengths */
-       gfar_write(&regs->maxfrm, priv->rx_buffer_size);
-       gfar_write(&regs->mrblr, priv->rx_buffer_size);
+       gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
+       gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
 
        /* Initialize the Minimum Frame Length Register */
        gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
@@ -1216,12 +1165,11 @@ void gfar_mac_reset(struct gfar_private *priv)
        /* Initialize MACCFG2. */
        tempval = MACCFG2_INIT_SETTINGS;
 
-       /* If the mtu is larger than the max size for standard
-        * ethernet frames (ie, a jumbo frame), then set maccfg2
-        * to allow huge frames, and to check the length
+       /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
+        * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
+        * and by checking RxBD[LG] and discarding larger than MAXFRM.
         */
-       if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
-           gfar_has_errata(priv, GFAR_ERRATA_74))
+       if (gfar_has_errata(priv, GFAR_ERRATA_74))
                tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
 
        gfar_write(&regs->maccfg2, tempval);
@@ -1376,7 +1324,6 @@ static int gfar_probe(struct platform_device *ofdev)
        priv->dev = &ofdev->dev;
        SET_NETDEV_DEV(dev, &ofdev->dev);
 
-       spin_lock_init(&priv->bflock);
        INIT_WORK(&priv->reset_task, gfar_reset_task);
 
        platform_set_drvdata(ofdev, priv);
@@ -1432,8 +1379,6 @@ static int gfar_probe(struct platform_device *ofdev)
            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
                dev->needed_headroom = GMAC_FCB_LEN;
 
-       priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-
        /* Initializing some of the rx/tx queue level parameters */
        for (i = 0; i < priv->num_tx_queues; i++) {
                priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
@@ -1470,9 +1415,8 @@ static int gfar_probe(struct platform_device *ofdev)
                goto register_fail;
        }
 
-       device_init_wakeup(&dev->dev,
-                          priv->device_flags &
-                          FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+       device_set_wakeup_capable(&dev->dev, priv->device_flags &
+                                 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
        /* fill out IRQ number and name fields */
        for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1484,37 @@ static int gfar_suspend(struct device *dev)
        struct gfar_private *priv = dev_get_drvdata(dev);
        struct net_device *ndev = priv->ndev;
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned long flags;
        u32 tempval;
-
        int magic_packet = priv->wol_en &&
                           (priv->device_flags &
                            FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
+       if (!netif_running(ndev))
+               return 0;
+
+       disable_napi(priv);
+       netif_tx_lock(ndev);
        netif_device_detach(ndev);
+       netif_tx_unlock(ndev);
 
-       if (netif_running(ndev)) {
+       gfar_halt(priv);
 
-               local_irq_save(flags);
-               lock_tx_qs(priv);
+       if (magic_packet) {
+               /* Enable interrupt on Magic Packet */
+               gfar_write(&regs->imask, IMASK_MAG);
 
-               gfar_halt_nodisable(priv);
+               /* Enable Magic Packet mode */
+               tempval = gfar_read(&regs->maccfg2);
+               tempval |= MACCFG2_MPEN;
+               gfar_write(&regs->maccfg2, tempval);
 
-               /* Disable Tx, and Rx if wake-on-LAN is disabled. */
+               /* re-enable the Rx block */
                tempval = gfar_read(&regs->maccfg1);
-
-               tempval &= ~MACCFG1_TX_EN;
-
-               if (!magic_packet)
-                       tempval &= ~MACCFG1_RX_EN;
-
+               tempval |= MACCFG1_RX_EN;
                gfar_write(&regs->maccfg1, tempval);
 
-               unlock_tx_qs(priv);
-               local_irq_restore(flags);
-
-               disable_napi(priv);
-
-               if (magic_packet) {
-                       /* Enable interrupt on Magic Packet */
-                       gfar_write(&regs->imask, IMASK_MAG);
-
-                       /* Enable Magic Packet mode */
-                       tempval = gfar_read(&regs->maccfg2);
-                       tempval |= MACCFG2_MPEN;
-                       gfar_write(&regs->maccfg2, tempval);
-               } else {
-                       phy_stop(priv->phydev);
-               }
+       } else {
+               phy_stop(priv->phydev);
        }
 
        return 0;
@@ -1592,37 +1525,26 @@ static int gfar_resume(struct device *dev)
        struct gfar_private *priv = dev_get_drvdata(dev);
        struct net_device *ndev = priv->ndev;
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
-       unsigned long flags;
        u32 tempval;
        int magic_packet = priv->wol_en &&
                           (priv->device_flags &
                            FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
 
-       if (!netif_running(ndev)) {
-               netif_device_attach(ndev);
+       if (!netif_running(ndev))
                return 0;
-       }
 
-       if (!magic_packet && priv->phydev)
+       if (magic_packet) {
+               /* Disable Magic Packet mode */
+               tempval = gfar_read(&regs->maccfg2);
+               tempval &= ~MACCFG2_MPEN;
+               gfar_write(&regs->maccfg2, tempval);
+       } else {
                phy_start(priv->phydev);
-
-       /* Disable Magic Packet mode, in case something
-        * else woke us up.
-        */
-       local_irq_save(flags);
-       lock_tx_qs(priv);
-
-       tempval = gfar_read(&regs->maccfg2);
-       tempval &= ~MACCFG2_MPEN;
-       gfar_write(&regs->maccfg2, tempval);
+       }
 
        gfar_start(priv);
 
-       unlock_tx_qs(priv);
-       local_irq_restore(flags);
-
        netif_device_attach(ndev);
-
        enable_napi(priv);
 
        return 0;
@@ -1639,10 +1561,7 @@ static int gfar_restore(struct device *dev)
                return 0;
        }
 
-       if (gfar_init_bds(ndev)) {
-               free_skb_resources(priv);
-               return -ENOMEM;
-       }
+       gfar_init_bds(ndev);
 
        gfar_mac_reset(priv);
 
@@ -1933,26 +1852,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
 
 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
 {
-       struct rxbd8 *rxbdp;
-       struct gfar_private *priv = netdev_priv(rx_queue->dev);
        int i;
 
-       rxbdp = rx_queue->rx_bd_base;
+       struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+       if (rx_queue->skb)
+               dev_kfree_skb(rx_queue->skb);
 
        for (i = 0; i < rx_queue->rx_ring_size; i++) {
-               if (rx_queue->rx_skbuff[i]) {
-                       dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
-                                        priv->rx_buffer_size,
-                                        DMA_FROM_DEVICE);
-                       dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
-                       rx_queue->rx_skbuff[i] = NULL;
-               }
+               struct  gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
                rxbdp->lstatus = 0;
                rxbdp->bufPtr = 0;
                rxbdp++;
+
+               if (!rxb->page)
+                       continue;
+
+               dma_unmap_single(rx_queue->dev, rxb->dma,
+                                PAGE_SIZE, DMA_FROM_DEVICE);
+               __free_page(rxb->page);
+
+               rxb->page = NULL;
        }
-       kfree(rx_queue->rx_skbuff);
-       rx_queue->rx_skbuff = NULL;
+
+       kfree(rx_queue->rx_buff);
+       rx_queue->rx_buff = NULL;
 }
 
 /* If there are any tx skbs or rx skbs still around, free them.
@@ -1977,7 +1902,7 @@ static void free_skb_resources(struct gfar_private *priv)
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               if (rx_queue->rx_skbuff)
+               if (rx_queue->rx_buff)
                        free_skb_rx_queue(rx_queue);
        }
 
@@ -2045,7 +1970,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
                /* Install our interrupt handlers for Error,
                 * Transmit, and Receive
                 */
-               err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+               err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
+                                 IRQF_NO_SUSPEND,
                                  gfar_irq(grp, ER)->name, grp);
                if (err < 0) {
                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +1994,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
                        goto rx_irq_fail;
                }
        } else {
-               err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+               err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
+                                 IRQF_NO_SUSPEND,
                                  gfar_irq(grp, TX)->name, grp);
                if (err < 0) {
                        netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2169,8 +2096,6 @@ static int gfar_enet_open(struct net_device *dev)
        if (err)
                return err;
 
-       device_set_wakeup_enable(&dev->dev, priv->wol_en);
-
        return err;
 }
 
@@ -2535,7 +2460,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
        struct gfar_private *priv = netdev_priv(dev);
        int frame_size = new_mtu + ETH_HLEN;
 
-       if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+       if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
                netif_err(priv, drv, dev, "Invalid MTU setting\n");
                return -EINVAL;
        }
@@ -2589,15 +2514,6 @@ static void gfar_timeout(struct net_device *dev)
        schedule_work(&priv->reset_task);
 }
 
-static void gfar_align_skb(struct sk_buff *skb)
-{
-       /* We need the data buffer to be aligned properly.  We will reserve
-        * as many bytes as needed to align the data properly
-        */
-       skb_reserve(skb, RXBUF_ALIGNMENT -
-                   (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
-}
-
 /* Interrupt Handler for Transmit complete */
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
@@ -2655,7 +2571,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
                        struct skb_shared_hwtstamps shhwtstamps;
-                       u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+                       u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+                                         ~0x7UL);
 
                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
                        shhwtstamps.hwtstamp = ns_to_ktime(*ns);
@@ -2704,49 +2621,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        netdev_tx_completed_queue(txq, howmany, bytes_sent);
 }
 
-static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
 {
-       struct gfar_private *priv = netdev_priv(dev);
-       struct sk_buff *skb;
+       struct page *page;
+       dma_addr_t addr;
 
-       skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
-       if (!skb)
-               return NULL;
+       page = dev_alloc_page();
+       if (unlikely(!page))
+               return false;
 
-       gfar_align_skb(skb);
+       addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+               __free_page(page);
 
-       return skb;
+               return false;
+       }
+
+       rxb->dma = addr;
+       rxb->page = page;
+       rxb->page_offset = 0;
+
+       return true;
 }
 
-static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
 {
-       struct gfar_private *priv = netdev_priv(dev);
-       struct sk_buff *skb;
-       dma_addr_t addr;
+       struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+       struct gfar_extra_stats *estats = &priv->extra_stats;
 
-       skb = gfar_alloc_skb(dev);
-       if (!skb)
-               return NULL;
+       netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+       atomic64_inc(&estats->rx_alloc_err);
+}
 
-       addr = dma_map_single(priv->dev, skb->data,
-                             priv->rx_buffer_size, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(priv->dev, addr))) {
-               dev_kfree_skb_any(skb);
-               return NULL;
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+                               int alloc_cnt)
+{
+       struct rxbd8 *bdp;
+       struct gfar_rx_buff *rxb;
+       int i;
+
+       i = rx_queue->next_to_use;
+       bdp = &rx_queue->rx_bd_base[i];
+       rxb = &rx_queue->rx_buff[i];
+
+       while (alloc_cnt--) {
+               /* try reuse page */
+               if (unlikely(!rxb->page)) {
+                       if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+                               gfar_rx_alloc_err(rx_queue);
+                               break;
+                       }
+               }
+
+               /* Setup the new RxBD */
+               gfar_init_rxbdp(rx_queue, bdp,
+                               rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+               /* Update to the next pointer */
+               bdp++;
+               rxb++;
+
+               if (unlikely(++i == rx_queue->rx_ring_size)) {
+                       i = 0;
+                       bdp = rx_queue->rx_bd_base;
+                       rxb = rx_queue->rx_buff;
+               }
        }
 
-       *bufaddr = addr;
-       return skb;
+       rx_queue->next_to_use = i;
+       rx_queue->next_to_alloc = i;
 }
 
-static inline void count_errors(unsigned short status, struct net_device *dev)
+static void count_errors(u32 lstatus, struct net_device *ndev)
 {
-       struct gfar_private *priv = netdev_priv(dev);
-       struct net_device_stats *stats = &dev->stats;
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
        struct gfar_extra_stats *estats = &priv->extra_stats;
 
        /* If the packet was truncated, none of the other errors matter */
-       if (status & RXBD_TRUNCATED) {
+       if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
                stats->rx_length_errors++;
 
                atomic64_inc(&estats->rx_trunc);
@@ -2754,25 +2707,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
                return;
        }
        /* Count the errors, if there were any */
-       if (status & (RXBD_LARGE | RXBD_SHORT)) {
+       if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
                stats->rx_length_errors++;
 
-               if (status & RXBD_LARGE)
+               if (lstatus & BD_LFLAG(RXBD_LARGE))
                        atomic64_inc(&estats->rx_large);
                else
                        atomic64_inc(&estats->rx_short);
        }
-       if (status & RXBD_NONOCTET) {
+       if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
                stats->rx_frame_errors++;
                atomic64_inc(&estats->rx_nonoctet);
        }
-       if (status & RXBD_CRCERR) {
+       if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
                atomic64_inc(&estats->rx_crcerr);
                stats->rx_crc_errors++;
        }
-       if (status & RXBD_OVERRUN) {
+       if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
                atomic64_inc(&estats->rx_overrun);
-               stats->rx_crc_errors++;
+               stats->rx_over_errors++;
        }
 }
 
@@ -2823,6 +2776,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
        return IRQ_HANDLED;
 }
 
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+                            struct sk_buff *skb, bool first)
+{
+       unsigned int size = lstatus & BD_LENGTH_MASK;
+       struct page *page = rxb->page;
+
+       /* Remove the FCS from the packet length */
+       if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+               size -= ETH_FCS_LEN;
+
+       if (likely(first))
+               skb_put(skb, size);
+       else
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                               rxb->page_offset + RXBUF_ALIGNMENT,
+                               size, GFAR_RXB_TRUESIZE);
+
+       /* try reuse page */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* change offset to the other half */
+       rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+       atomic_inc(&page->_count);
+
+       return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+                              struct gfar_rx_buff *old_rxb)
+{
+       struct gfar_rx_buff *new_rxb;
+       u16 nta = rxq->next_to_alloc;
+
+       new_rxb = &rxq->rx_buff[nta];
+
+       /* find next buf that can reuse a page */
+       nta++;
+       rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+       /* copy page reference */
+       *new_rxb = *old_rxb;
+
+       /* sync for use by the device */
+       dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+                                        old_rxb->page_offset,
+                                        GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+                                           u32 lstatus, struct sk_buff *skb)
+{
+       struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+       struct page *page = rxb->page;
+       bool first = false;
+
+       if (likely(!skb)) {
+               void *buff_addr = page_address(page) + rxb->page_offset;
+
+               skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+               if (unlikely(!skb)) {
+                       gfar_rx_alloc_err(rx_queue);
+                       return NULL;
+               }
+               skb_reserve(skb, RXBUF_ALIGNMENT);
+               first = true;
+       }
+
+       dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+                                     GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+       if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+               /* reuse the free half of the page */
+               gfar_reuse_rx_page(rx_queue, rxb);
+       } else {
+               /* page cannot be reused, unmap it */
+               dma_unmap_page(rx_queue->dev, rxb->dma,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+       }
+
+       /* clear rxb content */
+       rxb->page = NULL;
+
+       return skb;
+}
+
 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 {
        /* If valid headers were found, and valid sums
@@ -2837,10 +2877,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 }
 
 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-                              int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
 {
-       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_private *priv = netdev_priv(ndev);
        struct rxfcb *fcb = NULL;
 
        /* fcb is at the beginning if exists */
@@ -2849,10 +2888,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        /* Remove the FCB from the skb
         * Remove the padded bytes, if there are any
         */
-       if (amount_pull) {
-               skb_record_rx_queue(skb, fcb->rq);
-               skb_pull(skb, amount_pull);
-       }
+       if (priv->uses_rxfcb)
+               skb_pull(skb, GMAC_FCB_LEN);
 
        /* Get receive timestamp from the skb */
        if (priv->hwts_rx_en) {
@@ -2866,24 +2903,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        if (priv->padding)
                skb_pull(skb, priv->padding);
 
-       if (dev->features & NETIF_F_RXCSUM)
+       if (ndev->features & NETIF_F_RXCSUM)
                gfar_rx_checksum(skb, fcb);
 
        /* Tell the skb what kind of packet this is */
-       skb->protocol = eth_type_trans(skb, dev);
+       skb->protocol = eth_type_trans(skb, ndev);
 
        /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
         * Even if vlan rx accel is disabled, on some chips
         * RXFCB_VLN is pseudo randomly set.
         */
-       if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+       if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
            be16_to_cpu(fcb->flags) & RXFCB_VLN)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       be16_to_cpu(fcb->vlctl));
-
-       /* Send the packet up the stack */
-       napi_gro_receive(napi, skb);
-
 }
 
 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2892,91 +2925,89 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  */
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 {
-       struct net_device *dev = rx_queue->dev;
-       struct rxbd8 *bdp, *base;
-       struct sk_buff *skb;
-       int pkt_len;
-       int amount_pull;
-       int howmany = 0;
-       struct gfar_private *priv = netdev_priv(dev);
+       struct net_device *ndev = rx_queue->ndev;
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct rxbd8 *bdp;
+       int i, howmany = 0;
+       struct sk_buff *skb = rx_queue->skb;
+       int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+       unsigned int total_bytes = 0, total_pkts = 0;
 
        /* Get the first full descriptor */
-       bdp = rx_queue->cur_rx;
-       base = rx_queue->rx_bd_base;
+       i = rx_queue->next_to_clean;
 
-       amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
+       while (rx_work_limit--) {
+               u32 lstatus;
 
-       while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
-               struct sk_buff *newskb;
-               dma_addr_t bufaddr;
+               if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+                       gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+                       cleaned_cnt = 0;
+               }
 
+               bdp = &rx_queue->rx_bd_base[i];
+               lstatus = be32_to_cpu(bdp->lstatus);
+               if (lstatus & BD_LFLAG(RXBD_EMPTY))
+                       break;
+
+               /* order rx buffer descriptor reads */
                rmb();
 
-               /* Add another skb for the future */
-               newskb = gfar_new_skb(dev, &bufaddr);
+               /* fetch next to clean buffer from the ring */
+               skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+               if (unlikely(!skb))
+                       break;
 
-               skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
+               cleaned_cnt++;
+               howmany++;
 
-               dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
-                                priv->rx_buffer_size, DMA_FROM_DEVICE);
-
-               if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
-                            be16_to_cpu(bdp->length) > priv->rx_buffer_size))
-                       bdp->status = cpu_to_be16(RXBD_LARGE);
-
-               /* We drop the frame if we failed to allocate a new buffer */
-               if (unlikely(!newskb ||
-                            !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
-                            be16_to_cpu(bdp->status) & RXBD_ERR)) {
-                       count_errors(be16_to_cpu(bdp->status), dev);
-
-                       if (unlikely(!newskb)) {
-                               newskb = skb;
-                               bufaddr = be32_to_cpu(bdp->bufPtr);
-                       } else if (skb)
-                               dev_kfree_skb(skb);
-               } else {
-                       /* Increment the number of packets */
-                       rx_queue->stats.rx_packets++;
-                       howmany++;
-
-                       if (likely(skb)) {
-                               pkt_len = be16_to_cpu(bdp->length) -
-                                         ETH_FCS_LEN;
-                               /* Remove the FCS from the packet length */
-                               skb_put(skb, pkt_len);
-                               rx_queue->stats.rx_bytes += pkt_len;
-                               skb_record_rx_queue(skb, rx_queue->qindex);
-                               gfar_process_frame(dev, skb, amount_pull,
-                                                  &rx_queue->grp->napi_rx);
+               if (unlikely(++i == rx_queue->rx_ring_size))
+                       i = 0;
 
-                       } else {
-                               netif_warn(priv, rx_err, dev, "Missing skb!\n");
-                               rx_queue->stats.rx_dropped++;
-                               atomic64_inc(&priv->extra_stats.rx_skbmissing);
-                       }
+               rx_queue->next_to_clean = i;
+
+               /* fetch next buffer if not the last in frame */
+               if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+                       continue;
 
+               if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+                       count_errors(lstatus, ndev);
+
+                       /* discard faulty buffer */
+                       dev_kfree_skb(skb);
+                       skb = NULL;
+                       rx_queue->stats.rx_dropped++;
+                       continue;
                }
 
-               rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+               /* Increment the number of packets */
+               total_pkts++;
+               total_bytes += skb->len;
 
-               /* Setup the new bdp */
-               gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+               skb_record_rx_queue(skb, rx_queue->qindex);
 
-               /* Update Last Free RxBD pointer for LFC */
-               if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
-                       gfar_write(rx_queue->rfbptr, (u32)bdp);
+               gfar_process_frame(ndev, skb);
 
-               /* Update to the next pointer */
-               bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+               /* Send the packet up the stack */
+               napi_gro_receive(&rx_queue->grp->napi_rx, skb);
 
-               /* update to point at the next skb */
-               rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
-                                     RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+               skb = NULL;
        }
 
-       /* Update the current rxbd pointer to be the next one */
-       rx_queue->cur_rx = bdp;
+       /* Store incomplete frames for completion */
+       rx_queue->skb = skb;
+
+       rx_queue->stats.rx_packets += total_pkts;
+       rx_queue->stats.rx_bytes += total_bytes;
+
+       if (cleaned_cnt)
+               gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+       /* Update Last Free RxBD pointer for LFC */
+       if (unlikely(priv->tx_actual_en)) {
+               u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+               gfar_write(rx_queue->rfbptr, bdp_dma);
+       }
 
        return howmany;
 }
@@ -3494,7 +3525,6 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
        struct phy_device *phydev = priv->phydev;
        struct gfar_priv_rx_q *rx_queue = NULL;
        int i;
-       struct rxbd8 *bdp;
 
        if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
                return;
@@ -3551,15 +3581,11 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
                /* Turn last free buffer recording on */
                if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
                        for (i = 0; i < priv->num_rx_queues; i++) {
+                               u32 bdp_dma;
+
                                rx_queue = priv->rx_queue[i];
-                               bdp = rx_queue->cur_rx;
-                               /* skip to previous bd */
-                               bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
-                                             rx_queue->rx_bd_base,
-                                             rx_queue->rx_ring_size);
-
-                               if (rx_queue->rfbptr)
-                                       gfar_write(rx_queue->rfbptr, (u32)bdp);
+                               bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+                               gfar_write(rx_queue->rfbptr, bdp_dma);
                        }
 
                        priv->tx_actual_en = 1;
index daa1d37de6427b93a756843074c5d2e5c1961467..8c1994856e93823174d29c08ed8ae691c1530120 100644 (file)
@@ -71,11 +71,6 @@ struct ethtool_rx_list {
 /* Number of bytes to align the rx bufs to */
 #define RXBUF_ALIGNMENT 64
 
-/* The number of bytes which composes a unit for the purpose of
- * allocating data buffers.  ie-for any given MTU, the data buffer
- * will be the next highest multiple of 512 bytes. */
-#define INCREMENTAL_BUFFER_SIZE 512
-
 #define PHY_INIT_TIMEOUT 100000
 
 #define DRV_NAME "gfar-enet"
@@ -92,6 +87,8 @@ extern const char gfar_driver_version[];
 #define DEFAULT_TX_RING_SIZE   256
 #define DEFAULT_RX_RING_SIZE   256
 
+#define GFAR_RX_BUFF_ALLOC     16
+
 #define GFAR_RX_MAX_RING_SIZE   256
 #define GFAR_TX_MAX_RING_SIZE   256
 
@@ -103,11 +100,14 @@ extern const char gfar_driver_version[];
 #define DEFAULT_RX_LFC_THR  16
 #define DEFAULT_LFC_PTVVAL  4
 
-#define DEFAULT_RX_BUFFER_SIZE  1536
+#define GFAR_RXB_SIZE 1536
+#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+                         + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_TRUESIZE 2048
+
 #define TX_RING_MOD_MASK(size) (size-1)
 #define RX_RING_MOD_MASK(size) (size-1)
-#define JUMBO_BUFFER_SIZE 9728
-#define JUMBO_FRAME_SIZE 9600
+#define GFAR_JUMBO_FRAME_SIZE 9600
 
 #define DEFAULT_FIFO_TX_THR 0x100
 #define DEFAULT_FIFO_TX_STARVE 0x40
@@ -640,6 +640,7 @@ struct rmon_mib
 };
 
 struct gfar_extra_stats {
+       atomic64_t rx_alloc_err;
        atomic64_t rx_large;
        atomic64_t rx_short;
        atomic64_t rx_nonoctet;
@@ -651,7 +652,6 @@ struct gfar_extra_stats {
        atomic64_t eberr;
        atomic64_t tx_babt;
        atomic64_t tx_underrun;
-       atomic64_t rx_skbmissing;
        atomic64_t tx_timeout;
 };
 
@@ -1012,34 +1012,42 @@ struct rx_q_stats {
        unsigned long rx_dropped;
 };
 
+struct gfar_rx_buff {
+       dma_addr_t dma;
+       struct page *page;
+       unsigned int page_offset;
+};
+
 /**
  *     struct gfar_priv_rx_q - per rx queue structure
- *     @rx_skbuff: skb pointers
- *     @skb_currx: currently use skb pointer
+ *     @rx_buff: Array of buffer info metadata structs
  *     @rx_bd_base: First rx buffer descriptor
- *     @cur_rx: Next free rx ring entry
+ *     @next_to_use: index of the next buffer to be alloc'd
+ *     @next_to_clean: index of the next buffer to be cleaned
  *     @qindex: index of this queue
- *     @dev: back pointer to the dev structure
+ *     @ndev: back pointer to net_device
  *     @rx_ring_size: Rx ring size
  *     @rxcoalescing: enable/disable rx-coalescing
  *     @rxic: receive interrupt coalescing vlaue
  */
 
 struct gfar_priv_rx_q {
-       struct  sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
-       dma_addr_t rx_bd_dma_base;
+       struct  gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
        struct  rxbd8 *rx_bd_base;
-       struct  rxbd8 *cur_rx;
-       struct  net_device *dev;
-       struct gfar_priv_grp *grp;
+       struct  net_device *ndev;
+       struct  device *dev;
+       u16 rx_ring_size;
+       u16 qindex;
+       struct  gfar_priv_grp *grp;
+       u16 next_to_clean;
+       u16 next_to_use;
+       u16 next_to_alloc;
+       struct  sk_buff *skb;
        struct rx_q_stats stats;
-       u16     skb_currx;
-       u16     qindex;
-       unsigned int    rx_ring_size;
-       /* RX Coalescing values */
+       u32 __iomem *rfbptr;
        unsigned char rxcoalescing;
        unsigned long rxic;
-       u32 __iomem *rfbptr;
+       dma_addr_t rx_bd_dma_base;
 };
 
 enum gfar_irqinfo_id {
@@ -1109,7 +1117,6 @@ struct gfar_private {
        struct device *dev;
        struct net_device *ndev;
        enum gfar_errata errata;
-       unsigned int rx_buffer_size;
 
        u16 uses_rxfcb;
        u16 padding;
@@ -1145,9 +1152,6 @@ struct gfar_private {
        int oldduplex;
        int oldlink;
 
-       /* Bitfield update lock */
-       spinlock_t bflock;
-
        uint32_t msg_enable;
 
        struct work_struct reset_task;
@@ -1295,6 +1299,28 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
        bdp->lstatus = cpu_to_be32(lstatus);
 }
 
+static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
+{
+       if (rxq->next_to_clean > rxq->next_to_use)
+               return rxq->next_to_clean - rxq->next_to_use - 1;
+
+       return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
+}
+
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
+{
+       struct rxbd8 *bdp;
+       u32 bdp_dma;
+       int i;
+
+       i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+       bdp = &rxq->rx_bd_base[i];
+       bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+       bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
+
+       return bdp_dma;
+}
+
 irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
index fda12fb32ec77a8538a0f1d1370d2e653c91856c..555e461b0cfe272e9944f594e2a584faef242f03 100644 (file)
@@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev,
                          struct ethtool_drvinfo *drvinfo);
 
 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+       /* extra stats */
+       "rx-allocation-errors",
        "rx-large-frame-errors",
        "rx-short-frame-errors",
        "rx-non-octet-errors",
@@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
        "ethernet-bus-error",
        "tx-babbling-errors",
        "tx-underrun-errors",
-       "rx-skb-missing-errors",
        "tx-timeout-errors",
+       /* rmon stats */
        "tx-rx-64-frames",
        "tx-rx-65-127-frames",
        "tx-rx-128-255-frames",
@@ -653,7 +655,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct gfar_private *priv = netdev_priv(dev);
-       unsigned long flags;
 
        if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
            wol->wolopts != 0)
@@ -664,9 +665,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
        device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
 
-       spin_lock_irqsave(&priv->bflock, flags);
-       priv->wol_en =  !!device_may_wakeup(&dev->dev);
-       spin_unlock_irqrestore(&priv->bflock, flags);
+       priv->wol_en = !!device_may_wakeup(&dev->dev);
 
        return 0;
 }
index d49bee38cd319a0a8c7afd2cad7f1cb1ac7f2ed3..cc2d8b4b18e3e2a99ef303b76809545496089787 100644 (file)
@@ -965,7 +965,6 @@ static struct platform_driver hip04_mac_driver = {
        .remove = hip04_remove,
        .driver = {
                .name           = DRV_NAME,
-               .owner          = THIS_MODULE,
                .of_match_table = hip04_mac_match,
        },
 };
index b3bac25db99cf59ed1cdd2e990962cf129297846..fca0a5be1f0f732cd340dd056e6be86b2fb0a925 100644 (file)
@@ -174,7 +174,6 @@ static struct platform_driver hip04_mdio_driver = {
        .remove = hip04_mdio_remove,
        .driver = {
                .name = "hip04-mdio",
-               .owner = THIS_MODULE,
                .of_match_table = hip04_mdio_match,
        },
 };
index 29bbb628d712b38e8b17529626b85c3532d7942f..7af870a3c549592803a55a5cfea89ea1b15044d3 100644 (file)
@@ -79,6 +79,11 @@ static unsigned int rx_flush __read_mostly = 0;
 module_param(rx_flush, uint, 0644);
 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
 
+static bool old_large_send __read_mostly;
+module_param(old_large_send, bool, S_IRUGO);
+MODULE_PARM_DESC(old_large_send,
+       "Use old large send method on firmware that supports the new method");
+
 struct ibmveth_stat {
        char name[ETH_GSTRING_LEN];
        int offset;
@@ -101,7 +106,8 @@ struct ibmveth_stat ibmveth_stats[] = {
        { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
        { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
        { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
-       { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
+       { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
+       { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
 };
 
 /* simple methods of getting data from the current rxq entry */
@@ -848,25 +854,91 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
        return rc1 ? rc1 : rc2;
 }
 
+static int ibmveth_set_tso(struct net_device *dev, u32 data)
+{
+       struct ibmveth_adapter *adapter = netdev_priv(dev);
+       unsigned long set_attr, clr_attr, ret_attr;
+       long ret1, ret2;
+       int rc1 = 0, rc2 = 0;
+       int restart = 0;
+
+       if (netif_running(dev)) {
+               restart = 1;
+               adapter->pool_config = 1;
+               ibmveth_close(dev);
+               adapter->pool_config = 0;
+       }
+
+       set_attr = 0;
+       clr_attr = 0;
+
+       if (data)
+               set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+       else
+               clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+
+       ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+       if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+           !old_large_send) {
+               ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+                                         set_attr, &ret_attr);
+
+               if (ret2 != H_SUCCESS) {
+                       netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
+                                  data, ret2);
+
+                       h_illan_attributes(adapter->vdev->unit_address,
+                                          set_attr, clr_attr, &ret_attr);
+
+                       if (data == 1)
+                               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+                       rc1 = -EIO;
+
+               } else {
+                       adapter->fw_large_send_support = data;
+                       adapter->large_send = data;
+               }
+       } else {
+               /* Older firmware version of large send offload does not
+                * support tcp6/ipv6
+                */
+               if (data == 1) {
+                       dev->features &= ~NETIF_F_TSO6;
+                       netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+               }
+               adapter->large_send = data;
+       }
+
+       if (restart)
+               rc2 = ibmveth_open(dev);
+
+       return rc1 ? rc1 : rc2;
+}
+
 static int ibmveth_set_features(struct net_device *dev,
        netdev_features_t features)
 {
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        int rx_csum = !!(features & NETIF_F_RXCSUM);
-       int rc;
-       netdev_features_t changed = features ^ dev->features;
-
-       if (features & NETIF_F_TSO & changed)
-               netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+       int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
+       int rc1 = 0, rc2 = 0;
 
-       if (rx_csum == adapter->rx_csum)
-               return 0;
+       if (rx_csum != adapter->rx_csum) {
+               rc1 = ibmveth_set_csum_offload(dev, rx_csum);
+               if (rc1 && !adapter->rx_csum)
+                       dev->features =
+                               features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+       }
 
-       rc = ibmveth_set_csum_offload(dev, rx_csum);
-       if (rc && !adapter->rx_csum)
-               dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+       if (large_send != adapter->large_send) {
+               rc2 = ibmveth_set_tso(dev, large_send);
+               if (rc2 && !adapter->large_send)
+                       dev->features =
+                               features & ~(NETIF_F_TSO | NETIF_F_TSO6);
+       }
 
-       return rc;
+       return rc1 ? rc1 : rc2;
 }
 
 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -917,7 +989,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
 
 static int ibmveth_send(struct ibmveth_adapter *adapter,
-                       union ibmveth_buf_desc *descs)
+                       union ibmveth_buf_desc *descs, unsigned long mss)
 {
        unsigned long correlator;
        unsigned int retry_count;
@@ -934,7 +1006,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
                                             descs[0].desc, descs[1].desc,
                                             descs[2].desc, descs[3].desc,
                                             descs[4].desc, descs[5].desc,
-                                            correlator, &correlator);
+                                            correlator, &correlator, mss,
+                                            adapter->fw_large_send_support);
        } while ((ret == H_BUSY) && (retry_count--));
 
        if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -955,6 +1028,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
        int last, i;
        int force_bounce = 0;
        dma_addr_t dma_addr;
+       unsigned long mss = 0;
 
        /*
         * veth handles a maximum of 6 segments including the header, so
@@ -980,6 +1054,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
 
        desc_flags = IBMVETH_BUF_VALID;
 
+       if (skb_is_gso(skb) && adapter->fw_large_send_support)
+               desc_flags |= IBMVETH_BUF_LRG_SND;
+
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                unsigned char *buf = skb_transport_header(skb) +
                                                skb->csum_offset;
@@ -1007,7 +1084,7 @@ retry_bounce:
                descs[0].fields.flags_len = desc_flags | skb->len;
                descs[0].fields.address = adapter->bounce_buffer_dma;
 
-               if (ibmveth_send(adapter, descs)) {
+               if (ibmveth_send(adapter, descs, 0)) {
                        adapter->tx_send_failed++;
                        netdev->stats.tx_dropped++;
                } else {
@@ -1041,16 +1118,23 @@ retry_bounce:
                descs[i+1].fields.address = dma_addr;
        }
 
-       if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
-               /* Put -1 in the IP checksum to tell phyp it
-                *  is a largesend packet and put the mss in the TCP checksum.
-                */
-               ip_hdr(skb)->check = 0xffff;
-               tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
-               adapter->tx_large_packets++;
+       if (skb_is_gso(skb)) {
+               if (adapter->fw_large_send_support) {
+                       mss = (unsigned long)skb_shinfo(skb)->gso_size;
+                       adapter->tx_large_packets++;
+               } else if (!skb_is_gso_v6(skb)) {
+                       /* Put -1 in the IP checksum to tell phyp it
+                        * is a largesend packet. Put the mss in
+                        * the TCP checksum.
+                        */
+                       ip_hdr(skb)->check = 0xffff;
+                       tcp_hdr(skb)->check =
+                               cpu_to_be16(skb_shinfo(skb)->gso_size);
+                       adapter->tx_large_packets++;
+               }
        }
 
-       if (ibmveth_send(adapter, descs)) {
+       if (ibmveth_send(adapter, descs, mss)) {
                adapter->tx_send_failed++;
                netdev->stats.tx_dropped++;
        } else {
@@ -1401,6 +1485,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        struct ibmveth_adapter *adapter;
        unsigned char *mac_addr_p;
        unsigned int *mcastFilterSize_p;
+       long ret;
+       unsigned long ret_attr;
 
        dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
                dev->unit_address);
@@ -1449,10 +1535,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        SET_NETDEV_DEV(netdev, &dev->dev);
        netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
        netdev->features |= netdev->hw_features;
 
-       /* TSO is disabled by default */
-       netdev->hw_features |= NETIF_F_TSO;
+       ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+       /* If running older firmware, TSO should not be enabled by default */
+       if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+           !old_large_send) {
+               netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+               netdev->features |= netdev->hw_features;
+       } else {
+               netdev->hw_features |= NETIF_F_TSO;
+       }
 
        memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
index 41dedb1fb2ae7403d89f3feed38b4f87197de067..4eade67fe30c32a0c528631b27ab9042bd1d4caf 100644 (file)
@@ -40,6 +40,8 @@
 #define IbmVethMcastRemoveFilter     0x2UL
 #define IbmVethMcastClearFilterTable 0x3UL
 
+#define IBMVETH_ILLAN_LRG_SR_ENABLED   0x0000000000010000UL
+#define IBMVETH_ILLAN_LRG_SND_SUPPORT  0x0000000000008000UL
 #define IBMVETH_ILLAN_PADDED_PKT_CSUM  0x0000000000002000UL
 #define IBMVETH_ILLAN_TRUNK_PRI_MASK   0x0000000000000F00UL
 #define IBMVETH_ILLAN_IPV6_TCP_CSUM            0x0000000000000004UL
 static inline long h_send_logical_lan(unsigned long unit_address,
                unsigned long desc1, unsigned long desc2, unsigned long desc3,
                unsigned long desc4, unsigned long desc5, unsigned long desc6,
-               unsigned long corellator_in, unsigned long *corellator_out)
+               unsigned long corellator_in, unsigned long *corellator_out,
+               unsigned long mss, unsigned long large_send_support)
 {
        long rc;
        unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
 
-       rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
-                       desc2, desc3, desc4, desc5, desc6, corellator_in);
+       if (large_send_support)
+               rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+                                 desc1, desc2, desc3, desc4, desc5, desc6,
+                                 corellator_in, mss);
+       else
+               rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+                                 desc1, desc2, desc3, desc4, desc5, desc6,
+                                 corellator_in);
 
        *corellator_out = retbuf[0];
 
@@ -147,11 +156,13 @@ struct ibmveth_adapter {
     struct ibmveth_rx_q rx_queue;
     int pool_config;
     int rx_csum;
+    int large_send;
     void *bounce_buffer;
     dma_addr_t bounce_buffer_dma;
 
     u64 fw_ipv6_csum_support;
     u64 fw_ipv4_csum_support;
+    u64 fw_large_send_support;
     /* adapter specific stats */
     u64 replenish_task_cycles;
     u64 replenish_no_mem;
@@ -182,6 +193,7 @@ struct ibmveth_buf_desc_fields {
 #endif
 #define IBMVETH_BUF_VALID      0x80000000
 #define IBMVETH_BUF_TOGGLE     0x40000000
+#define IBMVETH_BUF_LRG_SND     0x04000000
 #define IBMVETH_BUF_NO_CSUM    0x02000000
 #define IBMVETH_BUF_CSUM_GOOD  0x01000000
 #define IBMVETH_BUF_LEN_MASK   0x00FFFFFF
index 89d788d8f263e5c362c10166dc76fa59f517e12c..fea1601f32a3614122a75e03fd98e786cab2ac2a 100644 (file)
@@ -4588,6 +4588,7 @@ static int e1000_open(struct net_device *netdev)
        return 0;
 
 err_req_irq:
+       pm_qos_remove_request(&adapter->pm_qos_req);
        e1000e_release_hw_control(adapter);
        e1000_power_down_phy(adapter);
        e1000e_free_rx_resources(adapter->rx_ring);
index ec76c3fa3a041158dcb5c21872afd5dd8352b9aa..281fd8456146190427a0390cedd3cfb2806d31af 100644 (file)
@@ -98,7 +98,7 @@
 #define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
 
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG      (1 << 0)
+#define I40E_PRIV_FLAGS_NPAR_FLAG      BIT(0)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -289,35 +289,35 @@ struct i40e_pf {
        struct work_struct service_task;
 
        u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED              (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED                  (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED                 (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED              BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED                  BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED              BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED                        BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE             BIT_ULL(9)
 #ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED                 (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED                (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED               (u64)(1 << 22)
-#define I40E_FLAG_PTP                          (u64)(1 << 25)
-#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26)
+#define I40E_FLAG_IN_NETPOLL                   BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT            BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT           BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED                        BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED                  BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED                        BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED               BIT_ULL(22)
+#define I40E_FLAG_PTP                          BIT_ULL(25)
+#define I40E_FLAG_MFP_ENABLED                  BIT_ULL(26)
 #ifdef CONFIG_I40E_VXLAN
-#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
+#define I40E_FLAG_VXLAN_FILTER_SYNC            BIT_ULL(27)
 #endif
-#define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
+#define I40E_FLAG_PORT_ID_VALID                        BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE                  BIT_ULL(29)
 #define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
 
        /* tracks features that get auto disabled by errors */
@@ -443,8 +443,8 @@ struct i40e_vsi {
 
        u32 current_netdev_flags;
        unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED  (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER       (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED   BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER                BIT(1)
        unsigned long flags;
 
        struct list_head mac_filter_list;
index 929e3d72a01e5aa6901787fa708f046198af0a22..9101f5c00f37104dd993049e6a34e9103d5a627a 100644 (file)
@@ -34,7 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
-#define I40E_FW_API_VERSION_MINOR      0x0002
+#define I40E_FW_API_VERSION_MINOR      0x0004
 
 struct i40e_aq_desc {
        __le16 flags;
@@ -132,12 +132,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
-       i40e_aqc_opc_set_cppm_configuration     = 0x0103,
-       i40e_aqc_opc_set_arp_proxy_entry        = 0x0104,
-       i40e_aqc_opc_set_ns_proxy_entry         = 0x0105,
-
        /* LAA */
-       i40e_aqc_opc_mng_laa            = 0x0106,   /* AQ obsolete */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
 
@@ -262,7 +257,6 @@ enum i40e_admin_queue_opc {
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
-       i40e_aqc_opc_tunnel_key_structure       = 0x0B10,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
@@ -274,8 +268,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_oem_ocbb_initialize        = 0xFE03,
 
        /* debug commands */
-       i40e_aqc_opc_debug_get_deviceid         = 0xFF00,
-       i40e_aqc_opc_debug_set_mode             = 0xFF01,
        i40e_aqc_opc_debug_read_reg             = 0xFF03,
        i40e_aqc_opc_debug_write_reg            = 0xFF04,
        i40e_aqc_opc_debug_modify_reg           = 0xFF07,
@@ -509,7 +501,8 @@ struct i40e_aqc_mac_address_read {
 #define I40E_AQC_SAN_ADDR_VALID                0x20
 #define I40E_AQC_PORT_ADDR_VALID       0x40
 #define I40E_AQC_WOL_ADDR_VALID                0x80
-#define I40E_AQC_ADDR_VALID_MASK       0xf0
+#define I40E_AQC_MC_MAG_EN_VALID       0x100
+#define I40E_AQC_ADDR_VALID_MASK       0x1F0
        u8      reserved[6];
        __le32  addr_high;
        __le32  addr_low;
@@ -532,7 +525,9 @@ struct i40e_aqc_mac_address_write {
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY   0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL    0x4000
 #define I40E_AQC_WRITE_TYPE_PORT       0x8000
-#define I40E_AQC_WRITE_TYPE_MASK       0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG      0xC000
+#define I40E_AQC_WRITE_TYPE_MASK       0xC000
+
        __le16  mac_sah;
        __le32  mac_sal;
        u8      reserved[8];
@@ -1068,6 +1063,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
        __le16  seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
        __le16  vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK             0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
        u8      reserved[8];
 };
@@ -2064,6 +2060,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 #define I40E_AQC_CEE_PFC_STATUS_MASK   (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
 #define I40E_AQC_CEE_APP_STATUS_SHIFT  0x8
 #define I40E_AQC_CEE_APP_STATUS_MASK   (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK  (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT        0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT  0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK   (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
 struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
        u8      reserved1;
        u8      oper_num_tc;
index 0bae22da014db05d9cd7a56d5a9d21902e49ca41..167ca0d752ea8065c04a731395029d40eb90a888 100644 (file)
@@ -71,6 +71,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
        return status;
 }
 
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+       switch (aq_err) {
+       case I40E_AQ_RC_OK:
+               return "OK";
+       case I40E_AQ_RC_EPERM:
+               return "I40E_AQ_RC_EPERM";
+       case I40E_AQ_RC_ENOENT:
+               return "I40E_AQ_RC_ENOENT";
+       case I40E_AQ_RC_ESRCH:
+               return "I40E_AQ_RC_ESRCH";
+       case I40E_AQ_RC_EINTR:
+               return "I40E_AQ_RC_EINTR";
+       case I40E_AQ_RC_EIO:
+               return "I40E_AQ_RC_EIO";
+       case I40E_AQ_RC_ENXIO:
+               return "I40E_AQ_RC_ENXIO";
+       case I40E_AQ_RC_E2BIG:
+               return "I40E_AQ_RC_E2BIG";
+       case I40E_AQ_RC_EAGAIN:
+               return "I40E_AQ_RC_EAGAIN";
+       case I40E_AQ_RC_ENOMEM:
+               return "I40E_AQ_RC_ENOMEM";
+       case I40E_AQ_RC_EACCES:
+               return "I40E_AQ_RC_EACCES";
+       case I40E_AQ_RC_EFAULT:
+               return "I40E_AQ_RC_EFAULT";
+       case I40E_AQ_RC_EBUSY:
+               return "I40E_AQ_RC_EBUSY";
+       case I40E_AQ_RC_EEXIST:
+               return "I40E_AQ_RC_EEXIST";
+       case I40E_AQ_RC_EINVAL:
+               return "I40E_AQ_RC_EINVAL";
+       case I40E_AQ_RC_ENOTTY:
+               return "I40E_AQ_RC_ENOTTY";
+       case I40E_AQ_RC_ENOSPC:
+               return "I40E_AQ_RC_ENOSPC";
+       case I40E_AQ_RC_ENOSYS:
+               return "I40E_AQ_RC_ENOSYS";
+       case I40E_AQ_RC_ERANGE:
+               return "I40E_AQ_RC_ERANGE";
+       case I40E_AQ_RC_EFLUSHED:
+               return "I40E_AQ_RC_EFLUSHED";
+       case I40E_AQ_RC_BAD_ADDR:
+               return "I40E_AQ_RC_BAD_ADDR";
+       case I40E_AQ_RC_EMODE:
+               return "I40E_AQ_RC_EMODE";
+       case I40E_AQ_RC_EFBIG:
+               return "I40E_AQ_RC_EFBIG";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+       return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+       switch (stat_err) {
+       case 0:
+               return "OK";
+       case I40E_ERR_NVM:
+               return "I40E_ERR_NVM";
+       case I40E_ERR_NVM_CHECKSUM:
+               return "I40E_ERR_NVM_CHECKSUM";
+       case I40E_ERR_PHY:
+               return "I40E_ERR_PHY";
+       case I40E_ERR_CONFIG:
+               return "I40E_ERR_CONFIG";
+       case I40E_ERR_PARAM:
+               return "I40E_ERR_PARAM";
+       case I40E_ERR_MAC_TYPE:
+               return "I40E_ERR_MAC_TYPE";
+       case I40E_ERR_UNKNOWN_PHY:
+               return "I40E_ERR_UNKNOWN_PHY";
+       case I40E_ERR_LINK_SETUP:
+               return "I40E_ERR_LINK_SETUP";
+       case I40E_ERR_ADAPTER_STOPPED:
+               return "I40E_ERR_ADAPTER_STOPPED";
+       case I40E_ERR_INVALID_MAC_ADDR:
+               return "I40E_ERR_INVALID_MAC_ADDR";
+       case I40E_ERR_DEVICE_NOT_SUPPORTED:
+               return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+       case I40E_ERR_MASTER_REQUESTS_PENDING:
+               return "I40E_ERR_MASTER_REQUESTS_PENDING";
+       case I40E_ERR_INVALID_LINK_SETTINGS:
+               return "I40E_ERR_INVALID_LINK_SETTINGS";
+       case I40E_ERR_AUTONEG_NOT_COMPLETE:
+               return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+       case I40E_ERR_RESET_FAILED:
+               return "I40E_ERR_RESET_FAILED";
+       case I40E_ERR_SWFW_SYNC:
+               return "I40E_ERR_SWFW_SYNC";
+       case I40E_ERR_NO_AVAILABLE_VSI:
+               return "I40E_ERR_NO_AVAILABLE_VSI";
+       case I40E_ERR_NO_MEMORY:
+               return "I40E_ERR_NO_MEMORY";
+       case I40E_ERR_BAD_PTR:
+               return "I40E_ERR_BAD_PTR";
+       case I40E_ERR_RING_FULL:
+               return "I40E_ERR_RING_FULL";
+       case I40E_ERR_INVALID_PD_ID:
+               return "I40E_ERR_INVALID_PD_ID";
+       case I40E_ERR_INVALID_QP_ID:
+               return "I40E_ERR_INVALID_QP_ID";
+       case I40E_ERR_INVALID_CQ_ID:
+               return "I40E_ERR_INVALID_CQ_ID";
+       case I40E_ERR_INVALID_CEQ_ID:
+               return "I40E_ERR_INVALID_CEQ_ID";
+       case I40E_ERR_INVALID_AEQ_ID:
+               return "I40E_ERR_INVALID_AEQ_ID";
+       case I40E_ERR_INVALID_SIZE:
+               return "I40E_ERR_INVALID_SIZE";
+       case I40E_ERR_INVALID_ARP_INDEX:
+               return "I40E_ERR_INVALID_ARP_INDEX";
+       case I40E_ERR_INVALID_FPM_FUNC_ID:
+               return "I40E_ERR_INVALID_FPM_FUNC_ID";
+       case I40E_ERR_QP_INVALID_MSG_SIZE:
+               return "I40E_ERR_QP_INVALID_MSG_SIZE";
+       case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+               return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+       case I40E_ERR_INVALID_FRAG_COUNT:
+               return "I40E_ERR_INVALID_FRAG_COUNT";
+       case I40E_ERR_QUEUE_EMPTY:
+               return "I40E_ERR_QUEUE_EMPTY";
+       case I40E_ERR_INVALID_ALIGNMENT:
+               return "I40E_ERR_INVALID_ALIGNMENT";
+       case I40E_ERR_FLUSHED_QUEUE:
+               return "I40E_ERR_FLUSHED_QUEUE";
+       case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+               return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+       case I40E_ERR_INVALID_IMM_DATA_SIZE:
+               return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+       case I40E_ERR_TIMEOUT:
+               return "I40E_ERR_TIMEOUT";
+       case I40E_ERR_OPCODE_MISMATCH:
+               return "I40E_ERR_OPCODE_MISMATCH";
+       case I40E_ERR_CQP_COMPL_ERROR:
+               return "I40E_ERR_CQP_COMPL_ERROR";
+       case I40E_ERR_INVALID_VF_ID:
+               return "I40E_ERR_INVALID_VF_ID";
+       case I40E_ERR_INVALID_HMCFN_ID:
+               return "I40E_ERR_INVALID_HMCFN_ID";
+       case I40E_ERR_BACKING_PAGE_ERROR:
+               return "I40E_ERR_BACKING_PAGE_ERROR";
+       case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+               return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+       case I40E_ERR_INVALID_PBLE_INDEX:
+               return "I40E_ERR_INVALID_PBLE_INDEX";
+       case I40E_ERR_INVALID_SD_INDEX:
+               return "I40E_ERR_INVALID_SD_INDEX";
+       case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+               return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+       case I40E_ERR_INVALID_SD_TYPE:
+               return "I40E_ERR_INVALID_SD_TYPE";
+       case I40E_ERR_MEMCPY_FAILED:
+               return "I40E_ERR_MEMCPY_FAILED";
+       case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+               return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+       case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+               return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+       case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+               return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+       case I40E_ERR_SRQ_ENABLED:
+               return "I40E_ERR_SRQ_ENABLED";
+       case I40E_ERR_ADMIN_QUEUE_ERROR:
+               return "I40E_ERR_ADMIN_QUEUE_ERROR";
+       case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+               return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+       case I40E_ERR_BUF_TOO_SHORT:
+               return "I40E_ERR_BUF_TOO_SHORT";
+       case I40E_ERR_ADMIN_QUEUE_FULL:
+               return "I40E_ERR_ADMIN_QUEUE_FULL";
+       case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+               return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+       case I40E_ERR_BAD_IWARP_CQE:
+               return "I40E_ERR_BAD_IWARP_CQE";
+       case I40E_ERR_NVM_BLANK_MODE:
+               return "I40E_ERR_NVM_BLANK_MODE";
+       case I40E_ERR_NOT_IMPLEMENTED:
+               return "I40E_ERR_NOT_IMPLEMENTED";
+       case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+               return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+       case I40E_ERR_DIAG_TEST_FAILED:
+               return "I40E_ERR_DIAG_TEST_FAILED";
+       case I40E_ERR_NOT_READY:
+               return "I40E_ERR_NOT_READY";
+       case I40E_NOT_SUPPORTED:
+               return "I40E_NOT_SUPPORTED";
+       case I40E_ERR_FIRMWARE_API_VERSION:
+               return "I40E_ERR_FIRMWARE_API_VERSION";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+       return hw->err_str;
+}
+
 /**
  * i40e_debug_aq
  * @hw: debug mask related to admin queue
@@ -1187,9 +1393,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
                        blink = false;
 
                if (blink)
-                       gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+                       gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
                else
-                       gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+                       gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 
                wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
                break;
@@ -2391,7 +2597,7 @@ i40e_aq_erase_nvm_exit:
 #define I40E_DEV_FUNC_CAP_MSIX_VF      0x44
 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR        0x45
 #define I40E_DEV_FUNC_CAP_IEEE_1588    0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1   0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10       0xF1
 #define I40E_DEV_FUNC_CAP_CEM          0xF2
 #define I40E_DEV_FUNC_CAP_IWARP                0x51
 #define I40E_DEV_FUNC_CAP_LED          0x61
@@ -2416,6 +2622,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        u32 valid_functions, num_functions;
        u32 number, logical_id, phys_id;
        struct i40e_hw_capabilities *p;
+       u8 major_rev;
        u32 i = 0;
        u16 id;
 
@@ -2433,6 +2640,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                number = le32_to_cpu(cap->number);
                logical_id = le32_to_cpu(cap->logical_id);
                phys_id = le32_to_cpu(cap->phys_id);
+               major_rev = cap->major_rev;
 
                switch (id) {
                case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2507,9 +2715,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                case I40E_DEV_FUNC_CAP_MSIX_VF:
                        p->num_msix_vectors_vf = number;
                        break;
-               case I40E_DEV_FUNC_CAP_MFP_MODE_1:
-                       if (number == 1)
-                               p->mfp_mode_1 = true;
+               case I40E_DEV_FUNC_CAP_FLEX10:
+                       if (major_rev == 1) {
+                               if (number == 1) {
+                                       p->flex10_enable = true;
+                                       p->flex10_capable = true;
+                               }
+                       } else {
+                               /* Capability revision >= 2 */
+                               if (number & 1)
+                                       p->flex10_enable = true;
+                               if (number & 2)
+                                       p->flex10_capable = true;
+                       }
+                       p->flex10_mode = logical_id;
+                       p->flex10_status = phys_id;
                        break;
                case I40E_DEV_FUNC_CAP_CEM:
                        if (number == 1)
@@ -2557,7 +2777,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        /* Software override ensuring FCoE is disabled if npar or mfp
         * mode because it is not supported in these modes.
         */
-       if (p->npar_enable || p->mfp_mode_1)
+       if (p->npar_enable || p->flex10_enable)
                p->fcoe = false;
 
        /* count the enabled ports (aka the "not disabled" ports) */
index e137e3fac8ee2fd280ffaac6ac5924365e3abbdc..50fc894a4cde3b78fa0b9bbc5edf8564532bbd29 100644 (file)
@@ -58,9 +58,9 @@
 #define I40E_IEEE_ETS_MAXTC_SHIFT      0
 #define I40E_IEEE_ETS_MAXTC_MASK       (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
 #define I40E_IEEE_ETS_CBS_SHIFT                6
-#define I40E_IEEE_ETS_CBS_MASK         (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK         BIT(I40E_IEEE_ETS_CBS_SHIFT)
 #define I40E_IEEE_ETS_WILLING_SHIFT    7
-#define I40E_IEEE_ETS_WILLING_MASK     (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK     BIT(I40E_IEEE_ETS_WILLING_SHIFT)
 #define I40E_IEEE_ETS_PRIO_0_SHIFT     0
 #define I40E_IEEE_ETS_PRIO_0_MASK      (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
 #define I40E_IEEE_ETS_PRIO_1_SHIFT     4
@@ -79,9 +79,9 @@
 #define I40E_IEEE_PFC_CAP_SHIFT                0
 #define I40E_IEEE_PFC_CAP_MASK         (0xF << I40E_IEEE_PFC_CAP_SHIFT)
 #define I40E_IEEE_PFC_MBC_SHIFT                6
-#define I40E_IEEE_PFC_MBC_MASK         (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK         BIT(I40E_IEEE_PFC_MBC_SHIFT)
 #define I40E_IEEE_PFC_WILLING_SHIFT    7
-#define I40E_IEEE_PFC_WILLING_MASK     (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK     BIT(I40E_IEEE_PFC_WILLING_SHIFT)
 
 /* Defines for IEEE APP TLV */
 #define I40E_IEEE_APP_SEL_SHIFT                0
index bd5079d5c1b682016db7a166c11c9a0e9f392b38..1c51f736a8d0ab54bba24fa8160fddd1f1d93174 100644 (file)
@@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
        /* Set up all the App TLVs if DCBx is negotiated */
        for (i = 0; i < dcbxcfg->numapps; i++) {
                prio = dcbxcfg->app[i].priority;
-               tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+               tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
 
                /* Add APP only if the TC is enabled for this VSI */
                if (tc_map & vsi->tc_config.enabled_tc) {
index da0faf478af076199e4281b0f3da57ad92c5e62b..d7c15d17faa634c1cb901fc360619e47436dfb39 100644 (file)
@@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
                pf->auto_disable_flags |= flag;
        }
        dev_info(&pf->pdev->dev, "requesting a PF reset\n");
-       i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+       i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 }
 
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
@@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                }
        } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "corer", 5) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "globr", 5) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "empr", 4) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "read", 4) == 0) {
                u32 address;
index 56438bd579e61a24d2f2c2cefefc89eecf2a926a..f141e78d409e5b1a7eeb8e7a304ec9864d2d0386 100644 (file)
@@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
        ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
        if (!ret_code &&
            ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
-            (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
-               ret_code = i40e_validate_nvm_checksum(hw, NULL);
-       } else {
-               ret_code = I40E_ERR_DIAG_TEST_FAILED;
-       }
-
-       return ret_code;
+            BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+               return i40e_validate_nvm_checksum(hw, NULL);
+       else
+               return I40E_ERR_DIAG_TEST_FAILED;
 }
index 9a68c65b17ea03bd00642aab5fe3b2e5a5066765..83d41c2cb02d43fceba54295f05151c12b912b5b 100644 (file)
@@ -148,7 +148,9 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
        I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
        I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+       I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
        I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+       I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
 
        /* LPI stats */
        I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -679,15 +681,17 @@ static int i40e_set_settings(struct net_device *netdev,
                /* make the aq call */
                status = i40e_aq_set_phy_config(hw, &config, NULL);
                if (status) {
-                       netdev_info(netdev, "Set phy config failed with error %d.\n",
-                                   status);
+                       netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+                                   i40e_stat_str(hw, status),
+                                   i40e_aq_str(hw, hw->aq.asq_last_status));
                        return -EAGAIN;
                }
 
                status = i40e_aq_get_link_info(hw, true, NULL, NULL);
                if (status)
-                       netdev_info(netdev, "Updating link info failed with error %d\n",
-                                   status);
+                       netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
+                                   i40e_stat_str(hw, status),
+                                   i40e_aq_str(hw, hw->aq.asq_last_status));
 
        } else {
                netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -707,8 +711,9 @@ static int i40e_nway_reset(struct net_device *netdev)
 
        ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
        if (ret) {
-               netdev_info(netdev, "link restart failed, aq_err=%d\n",
-                           pf->hw.aq.asq_last_status);
+               netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+                           i40e_stat_str(hw, ret),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                return -EIO;
        }
 
@@ -820,18 +825,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
        status = i40e_set_fc(hw, &aq_failures, link_up);
 
        if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
-               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
-                           status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
-               netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
-                           status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
-               netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
-                           status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
 
@@ -1009,7 +1017,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
                & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
                >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
        /* register returns value in power of 2, 64Kbyte chunks. */
-       val = (64 * 1024) * (1 << val);
+       val = (64 * 1024) * BIT(val);
        return val;
 }
 
@@ -1462,20 +1470,11 @@ static int i40e_get_ts_info(struct net_device *dev,
        else
                info->phc_index = -1;
 
-       info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
-       info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-                          (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 
        return 0;
 }
@@ -1591,7 +1590,7 @@ static void i40e_diag_test(struct net_device *netdev,
                        /* indicate we're in test mode */
                        dev_close(netdev);
                else
-                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+                       i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
                /* Link test performed before hardware reset
                 * so autoneg doesn't interfere with test result
@@ -1613,7 +1612,7 @@ static void i40e_diag_test(struct net_device *netdev,
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                clear_bit(__I40E_TESTING, &pf->state);
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
                if (if_running)
                        dev_open(netdev);
@@ -1646,7 +1645,7 @@ static void i40e_get_wol(struct net_device *netdev,
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+       if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
                wol->supported = 0;
                wol->wolopts = 0;
        } else {
@@ -1679,7 +1678,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if (((1 << hw->port) & wol_nvm_bits))
+       if (BIT(hw->port) & wol_nvm_bits)
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
@@ -2025,10 +2024,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -2037,10 +2036,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -2049,12 +2048,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                default:
                        return -EINVAL;
@@ -2063,12 +2062,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                default:
                        return -EINVAL;
@@ -2081,7 +2080,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
                break;
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -2090,15 +2089,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
                break;
        case IPV4_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+                       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
                break;
        case IPV6_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+                       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
                break;
        default:
                return -EINVAL;
index c8b621e0e7cda622c5a0fa9e795a898e53886cf5..5ea75dd537d62f6e9da5545af1f597071b4aa6bd 100644 (file)
@@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
 
        /* enable FCoE hash filter */
        val = rd32(hw, I40E_PFQF_HENA(1));
-       val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
-       val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+       val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+       val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
        val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
        wr32(hw, I40E_PFQF_HENA(1), val);
 
@@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
        pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
 
        /* Reserve 4K DDP contexts and 20K filter size for FCoE */
-       pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
-                                I40E_DMA_CNTX_BASE_SIZE;
+       pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+                               I40E_DMA_CNTX_BASE_SIZE;
        pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
-                               (1 << I40E_HASH_FILTER_SIZE_16K) *
+                               BIT(I40E_HASH_FILTER_SIZE_16K) *
                                I40E_HASH_FILTER_BASE_SIZE;
 
        /* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
                if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
                    app.protocolid == ETH_P_FCOE) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
-                       enabled_tc |= (1 << tc);
+                       enabled_tc |= BIT(tc);
                        break;
                }
        }
index 0d49e2d15d408c671c3acf581b10df5763fee7c3..a93174ddeaba747aa6a9576b53c8700ef3202dc2 100644 (file)
@@ -59,9 +59,9 @@
        (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
 
 #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT   \
-       (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+       BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
 #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT   \
-       (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+       BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
 
 #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e)    \
        I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
index 9b987ccc9e828738caef1c4811c5db1ca33b388d..5ebe12d56ebf422b6273d0e810be8bbf113ced04 100644 (file)
@@ -116,6 +116,7 @@ exit:
  * @hw: pointer to our HW structure
  * @hmc_info: pointer to the HMC configuration information structure
  * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
  *
  * This function:
  *     1. Initializes the pd entry
@@ -129,12 +130,14 @@ exit:
  **/
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index)
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg)
 {
        i40e_status ret_code = 0;
        struct i40e_hmc_pd_table *pd_table;
        struct i40e_hmc_pd_entry *pd_entry;
        struct i40e_dma_mem mem;
+       struct i40e_dma_mem *page = &mem;
        u32 sd_idx, rel_pd_idx;
        u64 *pd_addr;
        u64 page_desc;
@@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
        pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
        pd_entry = &pd_table->pd_entry[rel_pd_idx];
        if (!pd_entry->valid) {
-               /* allocate a 4K backing page */
-               ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
-                                                I40E_HMC_PAGED_BP_SIZE,
-                                                I40E_HMC_PD_BP_BUF_ALIGNMENT);
-               if (ret_code)
-                       goto exit;
+               if (rsrc_pg) {
+                       pd_entry->rsrc_pg = true;
+                       page = rsrc_pg;
+               } else {
+                       /* allocate a 4K backing page */
+                       ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+                                               I40E_HMC_PAGED_BP_SIZE,
+                                               I40E_HMC_PD_BP_BUF_ALIGNMENT);
+                       if (ret_code)
+                               goto exit;
+                       pd_entry->rsrc_pg = false;
+               }
 
-               pd_entry->bp.addr = mem;
+               pd_entry->bp.addr = *page;
                pd_entry->bp.sd_pd_index = pd_index;
                pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
                /* Set page address and valid bit */
-               page_desc = mem.pa | 0x1;
+               page_desc = page->pa | 0x1;
 
                pd_addr = (u64 *)pd_table->pd_page_addr.va;
                pd_addr += rel_pd_idx;
@@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
        I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
 
        /* free memory here */
-       ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+       if (!pd_entry->rsrc_pg)
+               ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
        if (ret_code)
                goto exit;
        if (!pd_table->ref_cnt)
@@ -287,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
                                            u32 idx, bool is_pf)
 {
        struct i40e_hmc_sd_entry *sd_entry;
-       i40e_status ret_code = 0;
+
+       if (!is_pf)
+               return I40E_NOT_SUPPORTED;
 
        /* get the entry and decrease its ref counter */
        sd_entry = &hmc_info->sd_table.sd_entry[idx];
-       if (is_pf) {
-               I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
-       } else {
-               ret_code = I40E_NOT_SUPPORTED;
-               goto exit;
-       }
-       ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
-       if (ret_code)
-               goto exit;
-exit:
-       return ret_code;
+       I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+       return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
 }
 
 /**
@@ -341,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
                                              u32 idx, bool is_pf)
 {
-       i40e_status ret_code = 0;
        struct i40e_hmc_sd_entry *sd_entry;
 
+       if (!is_pf)
+               return I40E_NOT_SUPPORTED;
+
        sd_entry = &hmc_info->sd_table.sd_entry[idx];
-       if (is_pf) {
-               I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
-       } else {
-               ret_code = I40E_NOT_SUPPORTED;
-               goto exit;
-       }
-       /* free memory here */
-       ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
-       if (ret_code)
-               goto exit;
-exit:
-       return ret_code;
+       I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+       return  i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
 }
index 732a02660330664ad7e65a2a78195a28bd4339fd..d906692113929e412df7806fd8795922586c9f78 100644 (file)
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
 struct i40e_hmc_pd_entry {
        struct i40e_hmc_bp bp;
        u32 sd_index;
+       bool rsrc_pg;
        bool valid;
 };
 
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
                 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |              \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
-               (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+               BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);              \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index);
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
                                        struct i40e_hmc_info *hmc_info,
                                        u32 idx);
index 0079ad7bcd0e1ff9c5fb985322a72d635418df14..fa371a2a40c6817e6f9f1dd8ff8b924ff3b08153 100644 (file)
@@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
        obj->cnt = txq_num;
        obj->base = 0;
        size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (fcoe_filt_num > obj->max_cnt) {
@@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
                                /* update the pd table entry */
                                ret_code = i40e_add_pd_table_entry(hw,
                                                                info->hmc_info,
-                                                               i);
+                                                               i, NULL);
                                if (ret_code) {
                                        pd_error = true;
                                        break;
@@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = ((u8)1 << ce_info->width) - 1;
+       mask = BIT(ce_info->width) - 1;
 
        src_byte = *from;
        src_byte &= mask;
@@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = ((u16)1 << ce_info->width) - 1;
+       mask = BIT(ce_info->width) - 1;
 
        /* don't swizzle the bits until after the mask because the mask bits
         * will be in a different bit position on big endian machines
@@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits,
         * to 5 bits so the shift will do nothing
         */
        if (ce_info->width < 32)
-               mask = ((u32)1 << ce_info->width) - 1;
+               mask = BIT(ce_info->width) - 1;
        else
                mask = ~(u32)0;
 
@@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits,
         * to 6 bits so the shift will do nothing
         */
        if (ce_info->width < 64)
-               mask = ((u64)1 << ce_info->width) - 1;
+               mask = BIT_ULL(ce_info->width) - 1;
        else
                mask = ~(u64)0;
 
index 48a52b35b61427c436b37ed070e998c7483150fb..857d294d2a453c3097a3ead5c7a5fa27df001ae4 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 4
+#define DRV_VERSION_BUILD 6
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -520,7 +520,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
        if (likely(new_data >= *offset))
                *stat = new_data - *offset;
        else
-               *stat = (new_data + ((u64)1 << 48)) - *offset;
+               *stat = (new_data + BIT_ULL(48)) - *offset;
        *stat &= 0xFFFFFFFFFFFFULL;
 }
 
@@ -543,7 +543,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
        if (likely(new_data >= *offset))
                *stat = (u32)(new_data - *offset);
        else
-               *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+               *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
 }
 
 /**
@@ -1123,6 +1123,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           pf->stat_offsets_loaded,
                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
 
+       if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+           !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+               nsd->fd_sb_status = true;
+       else
+               nsd->fd_sb_status = false;
+
+       if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+           !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+               nsd->fd_atr_status = true;
+       else
+               nsd->fd_atr_status = false;
+
        pf->stat_offsets_loaded = true;
 }
 
@@ -1264,7 +1276,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
 {
        struct i40e_aqc_remove_macvlan_element_data element;
        struct i40e_pf *pf = vsi->back;
-       i40e_status aq_ret;
+       i40e_status ret;
 
        /* Only appropriate for the PF main VSI */
        if (vsi->type != I40E_VSI_MAIN)
@@ -1275,8 +1287,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
        element.vlan_tag = 0;
        element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
                        I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
-       if (aq_ret)
+       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+       if (ret)
                return -ENOENT;
 
        return 0;
@@ -1514,7 +1526,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
                /* Find numtc from enabled TC bitmap */
                for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                       if (enabled_tc & (1 << i)) /* TC is enabled */
+                       if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
                                numtc++;
                }
                if (!numtc) {
@@ -1540,7 +1552,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        /* Setup queue offset/count for all TCs for given VSI */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
                /* See if the given TC is enabled for the given VSI */
-               if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+               if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+                       /* TC is enabled */
                        int pow, num_qps;
 
                        switch (vsi->type) {
@@ -1566,7 +1579,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                        /* find the next higher power-of-2 of num queue pairs */
                        num_qps = qcount;
                        pow = 0;
-                       while (num_qps && ((1 << pow) < qcount)) {
+                       while (num_qps && (BIT_ULL(pow) < qcount)) {
                                pow++;
                                num_qps >>= 1;
                        }
@@ -1716,10 +1729,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        bool add_happened = false;
        int filter_list_len = 0;
        u32 changed_flags = 0;
-       i40e_status aq_ret = 0;
+       i40e_status ret = 0;
        struct i40e_pf *pf;
        int num_add = 0;
        int num_del = 0;
+       int aq_err = 0;
        u16 cmd_flags;
 
        /* empty array typed pointers, kcalloc later */
@@ -1771,31 +1785,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_del == filter_list_len) {
-                               aq_ret = i40e_aq_remove_macvlan(&pf->hw,
-                                           vsi->seid, del_list, num_del,
-                                           NULL);
+                               ret = i40e_aq_remove_macvlan(&pf->hw,
+                                                 vsi->seid, del_list, num_del,
+                                                 NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
                                num_del = 0;
                                memset(del_list, 0, sizeof(*del_list));
 
-                               if (aq_ret &&
-                                   pf->hw.aq.asq_last_status !=
-                                                             I40E_AQ_RC_ENOENT)
+                               if (ret && aq_err != I40E_AQ_RC_ENOENT)
                                        dev_info(&pf->pdev->dev,
-                                                "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
-                                                aq_ret,
-                                                pf->hw.aq.asq_last_status);
+                                                "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+                                                i40e_stat_str(&pf->hw, ret),
+                                                i40e_aq_str(&pf->hw, aq_err));
                        }
                }
                if (num_del) {
-                       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+                       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
                                                     del_list, num_del, NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
                        num_del = 0;
 
-                       if (aq_ret &&
-                           pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+                       if (ret && aq_err != I40E_AQ_RC_ENOENT)
                                dev_info(&pf->pdev->dev,
-                                        "ignoring delete macvlan error, err %d, aq_err %d\n",
-                                        aq_ret, pf->hw.aq.asq_last_status);
+                                        "ignoring delete macvlan error, err %s aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw, aq_err));
                }
 
                kfree(del_list);
@@ -1833,29 +1847,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_add == filter_list_len) {
-                               aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                            add_list, num_add,
-                                                            NULL);
+                               ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                         add_list, num_add,
+                                                         NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
                                num_add = 0;
 
-                               if (aq_ret)
+                               if (ret)
                                        break;
                                memset(add_list, 0, sizeof(*add_list));
                        }
                }
                if (num_add) {
-                       aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                    add_list, num_add, NULL);
+                       ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                 add_list, num_add, NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
                        num_add = 0;
                }
                kfree(add_list);
                add_list = NULL;
 
-               if (add_happened && aq_ret &&
-                   pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+               if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
                        dev_info(&pf->pdev->dev,
-                                "add filter failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "add filter failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw, aq_err));
                        if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
                            !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                      &vsi->state)) {
@@ -1871,34 +1887,40 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
-                                                              vsi->seid,
-                                                              cur_multipromisc,
-                                                              NULL);
-               if (aq_ret)
+               ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+                                                           vsi->seid,
+                                                           cur_multipromisc,
+                                                           NULL);
+               if (ret)
                        dev_info(&pf->pdev->dev,
-                                "set multi promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "set multi promisc failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
        }
        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
                bool cur_promisc;
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                        &vsi->state));
-               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
-                                                            vsi->seid,
-                                                            cur_promisc, NULL);
-               if (aq_ret)
+               ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+                                                         vsi->seid,
+                                                         cur_promisc, NULL);
+               if (ret)
                        dev_info(&pf->pdev->dev,
-                                "set uni promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
-               aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-                                                  vsi->seid,
-                                                  cur_promisc, NULL);
-               if (aq_ret)
+                                "set uni promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+               ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+                                               vsi->seid,
+                                               cur_promisc, NULL);
+               if (ret)
                        dev_info(&pf->pdev->dev,
-                                "set brdcast promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "set brdcast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
        }
 
        clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1994,8 +2016,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vlan stripping failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
        }
 }
 
@@ -2023,8 +2047,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vlan stripping failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
        }
 }
 
@@ -2294,7 +2320,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 {
        struct i40e_vsi_context ctxt;
-       i40e_status aq_ret;
+       i40e_status ret;
 
        vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
        vsi->info.pvid = cpu_to_le16(vid);
@@ -2304,11 +2330,13 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 
        ctxt.seid = vsi->seid;
        ctxt.info = vsi->info;
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "add pvid failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
                return -ENOENT;
        }
 
@@ -2696,9 +2724,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
 #endif /* I40E_FCOE */
        /* round up for the chip's needs */
        vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-                               (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+                               BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
        vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
-                               (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+                               BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
        /* set up individual rings */
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -2728,7 +2756,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
        }
 
        for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
-               if (!(vsi->tc_config.enabled_tc & (1 << n)))
+               if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
                        continue;
 
                qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -4073,7 +4101,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
                if (app.selector == I40E_APP_SEL_TCPIP &&
                    app.protocolid == I40E_APP_PROTOID_ISCSI) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
-                       enabled_tc |= (1 << tc);
+                       enabled_tc |= BIT_ULL(tc);
                        break;
                }
        }
@@ -4122,7 +4150,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
        u8 i;
 
        for (i = 0; i < num_tc; i++)
-               enabled_tc |= 1 << i;
+               enabled_tc |= BIT(i);
 
        return enabled_tc;
 }
@@ -4157,7 +4185,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
        /* At least have TC0 */
        enabled_tc = (enabled_tc ? enabled_tc : 0x1);
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        num_tc++;
        }
        return num_tc;
@@ -4179,11 +4207,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
 
        /* Find the first enabled TC */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        break;
        }
 
-       return 1 << i;
+       return BIT(i);
 }
 
 /**
@@ -4221,26 +4249,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
        struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       i40e_status aq_ret;
+       i40e_status ret;
        u32 tc_bw_max;
        int i;
 
        /* Get the VSI level BW configuration */
-       aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get PF vsi bw config, err %d, aq_err %d\n",
-                        aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EINVAL;
        }
 
        /* Get the VSI level BW configuration per TC */
-       aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
-                                                 NULL);
-       if (aq_ret) {
+       ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+                                              NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
-                        aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EINVAL;
        }
 
@@ -4279,16 +4309,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                       u8 *bw_share)
 {
        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
-       i40e_status aq_ret;
+       i40e_status ret;
        int i;
 
        bw_data.tc_valid_bits = enabled_tc;
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                bw_data.tc_bw_credits[i] = bw_share[i];
 
-       aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
-                                         NULL);
-       if (aq_ret) {
+       ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+                                      NULL);
+       if (ret) {
                dev_info(&vsi->back->pdev->dev,
                         "AQ command Config VSI BW allocation per TC failed = %d\n",
                         vsi->back->hw.aq.asq_last_status);
@@ -4337,7 +4367,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
                 * will set the numtc for netdev as 2 that will be
                 * referenced by the netdev layer as TC 0 and 1.
                 */
-               if (vsi->tc_config.enabled_tc & (1 << i))
+               if (vsi->tc_config.enabled_tc & BIT_ULL(i))
                        netdev_set_tc_queue(netdev,
                                        vsi->tc_config.tc_info[i].netdev_tc,
                                        vsi->tc_config.tc_info[i].qcount,
@@ -4399,7 +4429,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now across all VSIs */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        bw_share[i] = 1;
        }
 
@@ -4423,8 +4453,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "update vsi failed, aq_err=%d\n",
-                        vsi->back->hw.aq.asq_last_status);
+                        "Update vsi tc config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
                goto out;
        }
        /* update the local VSI info with updated queue map */
@@ -4435,8 +4467,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ret = i40e_vsi_get_bw_info(vsi);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "Failed updating vsi bw info, aq_err=%d\n",
-                        vsi->back->hw.aq.asq_last_status);
+                        "Failed updating vsi bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
                goto out;
        }
 
@@ -4469,7 +4503,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        bw_data.tc_bw_share_credits[i] = 1;
        }
 
@@ -4477,8 +4511,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
                                                   &bw_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "veb bw config failed, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+                        "VEB bw config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto out;
        }
 
@@ -4486,8 +4521,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
        ret = i40e_veb_get_bw_info(veb);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Failed getting veb bw config, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+                        "Failed getting veb bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 
 out:
@@ -4574,8 +4610,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
        ret = i40e_aq_resume_port_tx(hw, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "AQ command Resume Port Tx failed = %d\n",
-                         pf->hw.aq.asq_last_status);
+                        "Resume Port Tx failed, err %s aq_err %s\n",
+                         i40e_stat_str(&pf->hw, ret),
+                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                /* Schedule PF reset to recover */
                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
                i40e_service_event_schedule(pf);
@@ -4627,8 +4664,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                }
        } else {
                dev_info(&pf->pdev->dev,
-                        "AQ Querying DCB configuration failed: aq_err %d\n",
-                        pf->hw.aq.asq_last_status);
+                        "Query for DCB configuration failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 
 out:
@@ -4859,7 +4897,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
 
        /* Generate TC map for number of tc requested */
        for (i = 0; i < tc; i++)
-               enabled_tc |= (1 << i);
+               enabled_tc |= BIT_ULL(i);
 
        /* Requesting same TC configuration as already enabled */
        if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -4998,7 +5036,7 @@ err_setup_rx:
 err_setup_tx:
        i40e_vsi_free_tx_resources(vsi);
        if (vsi == pf->vsi[pf->lan_vsi])
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
        return err;
 }
@@ -5066,7 +5104,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                i40e_vc_notify_reset(pf);
 
        /* do the biggest reset indicated */
-       if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+       if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
 
                /* Request a Global Reset
                 *
@@ -5081,7 +5119,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 
-       } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
 
                /* Request a Core Reset
                 *
@@ -5093,7 +5131,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
                i40e_flush(&pf->hw);
 
-       } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
 
                /* Request a PF Reset
                 *
@@ -5106,7 +5144,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                dev_dbg(&pf->pdev->dev, "PFR requested\n");
                i40e_handle_reset_warning(pf);
 
-       } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
                int v;
 
                /* Find the VSI(s) that requested a re-init */
@@ -5123,7 +5161,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
 
                /* no further action needed, so return now */
                return;
-       } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
                int v;
 
                /* Find the VSI(s) that needs to be brought down */
@@ -5253,7 +5291,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        /* Get updated DCBX data from firmware */
        ret = i40e_get_dcb_config(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+               dev_info(&pf->pdev->dev,
+                        "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto exit;
        }
 
@@ -5761,23 +5802,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
        rtnl_lock();
        if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
                clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
                clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
                clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
                clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
        }
 
@@ -5983,27 +6024,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
 {
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_vsi_context ctxt;
-       int aq_ret;
+       int ret;
 
        ctxt.seid = pf->main_vsi_seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return;
        }
        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
        ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vsi switch failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 }
 
@@ -6017,27 +6060,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
 {
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_vsi_context ctxt;
-       int aq_ret;
+       int ret;
 
        ctxt.seid = pf->main_vsi_seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return;
        }
        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
        ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vsi switch failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 }
 
@@ -6097,7 +6142,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
        ret = i40e_add_vsi(ctl_vsi);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "rebuild of owner VSI failed: %d\n", ret);
+                        "rebuild of veb_idx %d owner VSI failed: %d\n",
+                        veb->idx, ret);
                goto end_reconstitute;
        }
        i40e_vsi_reset_stats(ctl_vsi);
@@ -6176,8 +6222,10 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
                        buf_len = data_size;
                } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
                        dev_info(&pf->pdev->dev,
-                                "capability discovery failed: aq=%d\n",
-                                pf->hw.aq.asq_last_status);
+                                "capability discovery failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
                        return -ENODEV;
                }
        } while (err);
@@ -6363,7 +6411,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
        ret = i40e_init_adminq(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+               dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto clear_recovery;
        }
 
@@ -6373,11 +6423,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
 
        i40e_clear_pxe_mode(hw);
        ret = i40e_get_capabilities(pf);
-       if (ret) {
-               dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
-                        ret);
+       if (ret)
                goto end_core_reset;
-       }
 
        ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
                                hw->func_caps.num_rx_qp,
@@ -6418,12 +6465,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                                       I40E_AQ_EVENT_LINK_UPDOWN |
                                       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
        if (ret)
-               dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+               dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        /* make sure our flow control settings are restored */
        ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
        if (ret)
-               dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+               dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        /* Rebuild the VSIs and VEBs that existed before reset.
         * They are still in our local switch element arrays, so only
@@ -6484,8 +6535,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                msleep(75);
                ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (ret)
-                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                                pf->hw.aq.asq_last_status);
+                       dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
        }
        /* reinit the misc interrupt */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -6647,8 +6700,8 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
        pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
 
        for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-               if (pf->pending_vxlan_bitmap & (1 << i)) {
-                       pf->pending_vxlan_bitmap &= ~(1 << i);
+               if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+                       pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
                        port = pf->vxlan_ports[i];
                        if (port)
                                ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
@@ -6659,10 +6712,12 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
 
                        if (ret) {
                                dev_info(&pf->pdev->dev,
-                                        "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+                                        "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
                                         port ? "add" : "delete",
-                                        ntohs(port), i, ret,
-                                        pf->hw.aq.asq_last_status);
+                                        ntohs(port), i,
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
                                pf->vxlan_ports[i] = 0;
                        }
                }
@@ -7459,7 +7514,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
                        j = 0;
                /* lut = 4-byte sliding window of 4 lut entries */
                lut = (lut << 8) | (j &
-                        ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+                        (BIT(pf->hw.func_caps.rss_table_entry_width) - 1));
                /* On i = 3, we have 4 entries in lut; write to the register */
                if ((i & 3) == 3)
                        wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
@@ -7533,7 +7588,7 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
        i40e_status status;
 
        /* Set the valid bit for this PF */
-       bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+       bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
        bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
        bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
 
@@ -7567,8 +7622,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Cannot acquire NVM for read access, err %d: aq_err %d\n",
-                        ret, last_aq_status);
+                        "Cannot acquire NVM for read access, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
 
@@ -7583,8 +7639,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        i40e_release_nvm(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
-                        ret, last_aq_status);
+               dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
 
@@ -7596,8 +7653,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Cannot acquire NVM for write access, err %d: aq_err %d\n",
-                        ret, last_aq_status);
+                        "Cannot acquire NVM for write access, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
        /* Write it back out unchanged to initiate update NVM,
@@ -7615,8 +7673,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        i40e_release_nvm(&pf->hw);
        if (ret)
                dev_info(&pf->pdev->dev,
-                        "BW settings NOT SAVED, err %d aq_err %d\n",
-                        ret, last_aq_status);
+                        "BW settings NOT SAVED, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
 bw_commit_out:
 
        return ret;
@@ -7662,7 +7721,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
        /* Depending on PF configurations, it is possible that the RSS
         * maximum might end up larger than the available queues
         */
-       pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+       pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
        pf->rss_size = 1;
        pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -7673,7 +7732,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
        }
 
        /* MFP mode enabled */
-       if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+       if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
                pf->flags |= I40E_FLAG_MFP_ENABLED;
                dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
                if (i40e_get_npar_bw_setting(pf))
@@ -7812,7 +7871,7 @@ static int i40e_set_features(struct net_device *netdev,
        need_reset = i40e_set_ntuple(pf, features);
 
        if (need_reset)
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
        return 0;
 }
@@ -7875,7 +7934,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
 
        /* New port: add it and mark its index in the bitmap */
        pf->vxlan_ports[next_idx] = port;
-       pf->pending_vxlan_bitmap |= (1 << next_idx);
+       pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
        pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 
        dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
@@ -7906,7 +7965,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
                 * and make it pending
                 */
                pf->vxlan_ports[idx] = 0;
-               pf->pending_vxlan_bitmap |= (1 << idx);
+               pf->pending_vxlan_bitmap |= BIT_ULL(idx);
                pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 
                dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
@@ -7981,7 +8040,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        return err;
 }
 
-#ifdef HAVE_BRIDGE_ATTRIBS
 /**
  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
  * @dev: the netdev being configured
@@ -7995,7 +8053,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  * bridge mode enabled.
  **/
 static int i40e_ndo_bridge_setlink(struct net_device *dev,
-                                  struct nlmsghdr *nlh)
+                                  struct nlmsghdr *nlh,
+                                  u16 flags)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
@@ -8066,14 +8125,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
  * Return the mode in which the hardware bridge is operating in
  * i.e VEB or VEPA.
  **/
-#ifdef HAVE_BRIDGE_FILTER
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                   struct net_device *dev,
                                   u32 filter_mask, int nlflags)
-#else
-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                  struct net_device *dev, int nlflags)
-#endif /* HAVE_BRIDGE_FILTER */
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
@@ -8097,7 +8151,25 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
                                       nlflags, 0, 0, filter_mask, NULL);
 }
-#endif /* HAVE_BRIDGE_ATTRIBS */
+
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+                                            struct net_device *dev,
+                                            netdev_features_t features)
+{
+       if (skb->encapsulation &&
+           (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+            I40E_MAX_TUNNEL_HDR_LEN))
+               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+       return features;
+}
 
 static const struct net_device_ops i40e_netdev_ops = {
        .ndo_open               = i40e_open,
@@ -8133,10 +8205,9 @@ static const struct net_device_ops i40e_netdev_ops = {
 #endif
        .ndo_get_phys_port_id   = i40e_get_phys_port_id,
        .ndo_fdb_add            = i40e_ndo_fdb_add,
-#ifdef HAVE_BRIDGE_ATTRIBS
+       .ndo_features_check     = i40e_features_check,
        .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
        .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
 };
 
 /**
@@ -8304,8 +8375,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.flags = I40E_AQ_VSI_TYPE_PF;
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "couldn't get PF vsi config, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                "couldn't get PF vsi config, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
                        return -ENOENT;
                }
                vsi->info = ctxt.info;
@@ -8327,8 +8400,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
                        if (ret) {
                                dev_info(&pf->pdev->dev,
-                                        "update vsi failed, aq_err=%d\n",
-                                        pf->hw.aq.asq_last_status);
+                                        "update vsi failed, err %s aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
                                ret = -ENOENT;
                                goto err;
                        }
@@ -8345,9 +8420,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ret = i40e_vsi_config_tc(vsi, enabled_tc);
                        if (ret) {
                                dev_info(&pf->pdev->dev,
-                                        "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
-                                        enabled_tc, ret,
-                                        pf->hw.aq.asq_last_status);
+                                        "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+                                        enabled_tc,
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
                                ret = -ENOENT;
                        }
                }
@@ -8438,8 +8515,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
                if (ret) {
                        dev_info(&vsi->back->pdev->dev,
-                                "add vsi failed, aq_err=%d\n",
-                                vsi->back->hw.aq.asq_last_status);
+                                "add vsi failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
                        ret = -ENOENT;
                        goto err;
                }
@@ -8484,8 +8563,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
        ret = i40e_vsi_get_bw_info(vsi);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get vsi bw info, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
+                        "couldn't get vsi bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                /* VSI is already added so not tearing that up */
                ret = 0;
        }
@@ -8658,7 +8738,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
        ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
        if (ret < 0) {
                dev_info(&pf->pdev->dev,
-                        "failed to get tracking for %d queues for VSI %d err=%d\n",
+                        "failed to get tracking for %d queues for VSI %d err %d\n",
                         vsi->alloc_queue_pairs, vsi->seid, ret);
                goto err_vsi;
        }
@@ -8896,8 +8976,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
                                                  &bw_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "query veb bw config failed, aq_err=%d\n",
-                        hw->aq.asq_last_status);
+                        "query veb bw config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
                goto out;
        }
 
@@ -8905,8 +8986,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
                                                   &ets_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "query veb bw ets config failed, aq_err=%d\n",
-                        hw->aq.asq_last_status);
+                        "query veb bw ets config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
                goto out;
        }
 
@@ -9090,36 +9172,40 @@ void i40e_veb_release(struct i40e_veb *veb)
  **/
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
 {
+       struct i40e_pf *pf = veb->pf;
        bool is_default = false;
        bool is_cloud = false;
        int ret;
 
        /* get a VEB from the hardware */
-       ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+       ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
                              veb->enabled_tc, is_default,
                              is_cloud, &veb->seid, NULL);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't add VEB, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
+               dev_info(&pf->pdev->dev,
+                        "couldn't add VEB, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EPERM;
        }
 
        /* get statistics counter */
-       ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+       ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
                                         &veb->stats_idx, NULL, NULL, NULL);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't get VEB statistics idx, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
+               dev_info(&pf->pdev->dev,
+                        "couldn't get VEB statistics idx, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EPERM;
        }
        ret = i40e_veb_get_bw_info(veb);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't get VEB bw info, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
-               i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+               dev_info(&pf->pdev->dev,
+                        "couldn't get VEB bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
                return -ENOENT;
        }
 
@@ -9325,8 +9411,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
                                                &next_seid, NULL);
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "get switch config failed %d aq_err=%x\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                "get switch config failed err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
                        kfree(aq_buf);
                        return -ENOENT;
                }
@@ -9367,8 +9455,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
        ret = i40e_fetch_switch_configuration(pf, false);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't fetch switch config, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
+                        "couldn't fetch switch config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return ret;
        }
        i40e_pf_reset_stats(pf);
@@ -9743,7 +9832,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = i40e_init_shared_code(hw);
        if (err) {
-               dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+               dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+                        err);
                goto err_pf_reset;
        }
 
@@ -9910,15 +10000,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                       I40E_AQ_EVENT_LINK_UPDOWN |
                                       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
        if (err)
-               dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+               dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
            (pf->hw.aq.fw_maj_ver < 4)) {
                msleep(75);
                err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (err)
-                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                                pf->hw.aq.asq_last_status);
+                       dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
        }
        /* The main driver is (mostly) up and happy. We need to set this state
         * before setting up the misc vector or we get a race and the vector
@@ -10006,8 +10100,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* get the requested speeds from the fw */
        err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
        if (err)
-               dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
-                        err);
+               dev_info(&pf->pdev->dev,
+                        "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
        /* print a string summarizing features */
index 554e49d02683c1783a56a2fa9235412e96bdcd3f..ce986af213d2847d9e2b41f86aa9812aad0054a3 100644 (file)
@@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
        sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
                           I40E_GLNVM_GENS_SR_SIZE_SHIFT);
        /* Switching to words (sr_size contains power of 2KB) */
-       nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+       nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
 
        /* Check if we are in the normal or blank NVM programming mode */
        fla = rd32(hw, I40E_GLNVM_FLA);
@@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
        ret_code = i40e_poll_sr_srctl_done_bit(hw);
        if (!ret_code) {
                /* Write the address and start reading */
-               sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
-                        (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+               sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+                        BIT(I40E_GLNVM_SRCTL_START_SHIFT);
                wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 
                /* Poll I40E_GLNVM_SRCTL until the done bit is set */
index 7b34f1e660eacf99699d411e3e498d6fdf85d37c..d52a9f7873b0c927a78accc7a25b23ab3867f153 100644 (file)
@@ -58,6 +58,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
 void i40e_idle_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
index a92b7725dec3910964e5807a88d4f31622b55763..8c40d6ea15fda5cf4283769ab9d5753c67298917 100644 (file)
@@ -43,9 +43,8 @@
 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
 #define I40E_PTP_1GB_INCVAL  0x2000000000ULL
 
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  (0x1 << \
-                                       I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (2 << \
                                        I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 
 /**
@@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
 
        prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
 
-       if (!(prttsyn_stat & (1 << index)))
+       if (!(prttsyn_stat & BIT(index)))
                return;
 
        lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
index 9a4f2bc70cd2cb5494576f5530a7447502cd3c91..330e4ef43cd8fafc9a5a8985b0b713e9aec93e49 100644 (file)
@@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
        error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
                I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
-       if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+       if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
                if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
                    (I40E_DEBUG_FD & pf->hw.debug_mask))
                        dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
@@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                        dev_info(&pdev->dev,
                                "FD filter programming failed due to incorrect filter parameters\n");
                }
-       } else if (error ==
-                         (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+       } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
                if (I40E_DEBUG_FD & pf->hw.debug_mask)
                        dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
                                 rx_desc->wb.qword0.hi_dword.fd_id);
@@ -892,7 +891,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
         *  20-1249MB/s bulk   (8000 ints/s)
         */
        bytes_per_int = rc->total_bytes / rc->itr;
-       switch (rc->itr) {
+       switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
                if (bytes_per_int > 10)
                        new_latency_range = I40E_LOW_LATENCY;
@@ -905,9 +904,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                break;
        case I40E_BULK_LATENCY:
                if (bytes_per_int <= 20)
-                       rc->latency_range = I40E_LOW_LATENCY;
+                       new_latency_range = I40E_LOW_LATENCY;
+               break;
+       default:
+               if (bytes_per_int <= 20)
+                       new_latency_range = I40E_LOW_LATENCY;
                break;
        }
+       rc->latency_range = new_latency_range;
 
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
@@ -923,41 +927,13 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                break;
        }
 
-       if (new_itr != rc->itr) {
-               /* do an exponential smoothing */
-               new_itr = (10 * new_itr * rc->itr) /
-                         ((9 * new_itr) + rc->itr);
-               rc->itr = new_itr & I40E_MAX_ITR;
-       }
+       if (new_itr != rc->itr)
+               rc->itr = new_itr;
 
        rc->total_bytes = 0;
        rc->total_packets = 0;
 }
 
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-       u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-       struct i40e_hw *hw = &q_vector->vsi->back->hw;
-       u32 reg_addr;
-       u16 old_itr;
-
-       reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
-       old_itr = q_vector->rx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->rx);
-       if (old_itr != q_vector->rx.itr)
-               wr32(hw, reg_addr, q_vector->rx.itr);
-
-       reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
-       old_itr = q_vector->tx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->tx);
-       if (old_itr != q_vector->tx.itr)
-               wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
 /**
  * i40e_clean_programming_status - clean the programming status descriptor
  * @rx_ring: the rx ring that has this descriptor
@@ -1386,7 +1362,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                return;
 
        /* did the hardware decode the packet and checksum? */
-       if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+       if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
                return;
 
        /* both known and outer_ip must be set for the below code to work */
@@ -1401,25 +1377,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                ipv6 = true;
 
        if (ipv4 &&
-           (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-                        (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+           (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+                        BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
                goto checksum_fail;
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
-           rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+           rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                /* don't increment checksum err here, non-fatal err */
                return;
 
        /* there was some L4 error, count error and punt packet to the stack */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
                goto checksum_fail;
 
        /* handle packets that were not able to be checksummed due
         * to arrival speed, in this case the stack can compute
         * the csum.
         */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
        /* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1543,7 +1519,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1584,8 +1560,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1637,7 +1613,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        struct i40e_rx_buffer *next_buffer;
 
                        next_buffer = &rx_ring->rx_bi[i];
@@ -1647,7 +1623,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
                        continue;
                }
@@ -1669,7 +1645,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
 #ifdef I40E_FCOE
@@ -1730,7 +1706,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1753,7 +1729,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1771,13 +1747,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        rx_ring->rx_stats.non_eop_descs++;
                        continue;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
                        /* TODO: shouldn't we increment a counter indicating the
                         * drop?
@@ -1802,7 +1778,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
 #ifdef I40E_FCOE
@@ -1826,6 +1802,68 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        return total_rx_packets;
 }
 
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+                                         struct i40e_q_vector *q_vector)
+{
+       struct i40e_hw *hw = &vsi->back->hw;
+       u16 old_itr;
+       int vector;
+       u32 val;
+
+       vector = (q_vector->v_idx + vsi->base_vector);
+       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+               old_itr = q_vector->rx.itr;
+               i40e_set_new_dynamic_itr(&q_vector->rx);
+               if (old_itr != q_vector->rx.itr) {
+                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                       (I40E_RX_ITR <<
+                               I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                       (q_vector->rx.itr <<
+                               I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+               } else {
+                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                       (I40E_ITR_NONE <<
+                               I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+               }
+               if (!test_bit(__I40E_DOWN, &vsi->state))
+                       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+       } else {
+               i40e_irq_dynamic_enable(vsi,
+                                       q_vector->v_idx + vsi->base_vector);
+       }
+       if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+               old_itr = q_vector->tx.itr;
+               i40e_set_new_dynamic_itr(&q_vector->tx);
+               if (old_itr != q_vector->tx.itr) {
+                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                               (I40E_TX_ITR <<
+                                  I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                               (q_vector->tx.itr <<
+                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+               } else {
+                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                               (I40E_ITR_NONE <<
+                                  I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+               }
+               if (!test_bit(__I40E_DOWN, &vsi->state))
+                       wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+                             vsi->base_vector - 1), val);
+       } else {
+               i40e_irq_dynamic_enable(vsi,
+                                       q_vector->v_idx + vsi->base_vector);
+       }
+}
+
 /**
  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
@@ -1882,33 +1920,24 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
-       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-           ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-               i40e_update_dynamic_itr(q_vector);
-
-       if (!test_bit(__I40E_DOWN, &vsi->state)) {
-               if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-                       i40e_irq_dynamic_enable(vsi,
-                                       q_vector->v_idx + vsi->base_vector);
-               } else {
-                       struct i40e_hw *hw = &vsi->back->hw;
-                       /* We re-enable the queue 0 cause, but
-                        * don't worry about dynamic_enable
-                        * because we left it on for the other
-                        * possible interrupts during napi
-                        */
-                       u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-                       qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-                       wr32(hw, I40E_QINT_RQCTL(0), qval);
-
-                       qval = rd32(hw, I40E_QINT_TQCTL(0));
-                       qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
-                       wr32(hw, I40E_QINT_TQCTL(0), qval);
-
-                       i40e_irq_dynamic_enable_icr0(vsi->back);
-               }
+       if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+               i40e_update_enable_itr(vsi, q_vector);
+       } else { /* Legacy mode */
+               struct i40e_hw *hw = &vsi->back->hw;
+               /* We re-enable the queue 0 cause, but
+                * don't worry about dynamic_enable
+                * because we left it on for the other
+                * possible interrupts during napi
+                */
+               u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
+                          I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+               wr32(hw, I40E_QINT_RQCTL(0), qval);
+               qval = rd32(hw, I40E_QINT_TQCTL(0)) |
+                      I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_TQCTL(0), qval);
+               i40e_irq_dynamic_enable_icr0(vsi->back);
        }
-
        return 0;
 }
 
@@ -2616,6 +2645,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
            netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
                                                   tx_ring->queue_index)))
                writel(i, tx_ring->tail);
+       else
+               prefetchw(tx_desc + 1);
 
        return;
 
index 0dc48dc9ca61922a4b11bd0b7624f07c153c603a..429833c47245faa6cd3ad7d1e757b9668c169b05 100644 (file)
@@ -66,17 +66,17 @@ enum i40e_dyn_idx_t {
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,17 +129,17 @@ enum i40e_dyn_idx_t {
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING  4
 
-#define I40E_TX_FLAGS_CSUM             (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN          (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN          (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO              (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4             (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
-#define I40E_TX_FLAGS_TSYN             (u32)(1 << 8)
-#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL     (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM             BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN          BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN          BIT(2)
+#define I40E_TX_FLAGS_TSO              BIT(3)
+#define I40E_TX_FLAGS_IPV4             BIT(4)
+#define I40E_TX_FLAGS_IPV6             BIT(5)
+#define I40E_TX_FLAGS_FCCRC            BIT(6)
+#define I40E_TX_FLAGS_FSO              BIT(7)
+#define I40E_TX_FLAGS_TSYN             BIT(8)
+#define I40E_TX_FLAGS_FD_SB            BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
index 9a5a75b1e2bc053b50bec13adde2fd3aa4848595..a20128b82b62511ca55ec984d6587b3050761c82 100644 (file)
@@ -213,7 +213,17 @@ struct i40e_hw_capabilities {
        bool dcb;
        bool fcoe;
        bool iscsi; /* Indicates iSCSI enabled */
-       bool mfp_mode_1;
+       bool flex10_enable;
+       bool flex10_capable;
+       u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN       0x0
+#define I40E_FLEX10_MODE_DCC           0x1
+#define I40E_FLEX10_MODE_DCI           0x2
+
+       u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR   0x1
+#define I40E_FLEX10_STATUS_VC_MODE     0x2
+
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
@@ -487,6 +497,7 @@ struct i40e_hw {
 
        /* debug mask */
        u32 debug_mask;
+       char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
@@ -600,7 +611,7 @@ enum i40e_rx_desc_status_bits {
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT      0
-#define I40E_RXD_QW1_STATUS_MASK       (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK       ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
                                         << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -608,8 +619,8 @@ enum i40e_rx_desc_status_bits {
                                             I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK     (0x1UL << \
-                                        I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+                                   BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
        I40E_RX_DESC_FLTSTAT_NO_DATA    = 0,
@@ -743,8 +754,7 @@ enum i40e_rx_ptype_payload_layer {
                                         I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT  63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK   (0x1ULL << \
-                                        I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK   BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
        /* Note: These are predefined bit offsets */
@@ -920,12 +930,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_QW0_NATT_SHIFT    9
 #define I40E_TXD_CTX_QW0_NATT_MASK     (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING     (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING     BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING     (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT       11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK        (0x1ULL << \
-                                        I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+                                      BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST      I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -990,8 +1000,8 @@ enum i40e_filter_program_desc_fd_status {
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT       23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK        (0x1FFUL << \
-                                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+                                      BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT    4
 #define I40E_TXD_FLTR_QW1_CMD_MASK     (0xFFFFULL << \
@@ -1009,8 +1019,7 @@ enum i40e_filter_program_desc_pcmd {
 #define I40E_TXD_FLTR_QW1_DEST_MASK    (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT        (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
-                                        I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT      (0x9ULL + \
                                                 I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1134,6 +1143,8 @@ struct i40e_hw_port_stats {
        u64 fd_atr_match;
        u64 fd_sb_match;
        u64 fd_atr_tunnel_match;
+       u32 fd_atr_status;
+       u32 fd_sb_status;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
index 2d20af290fbf20bc9fc0dbdc88486fea61ec978f..a7ab463b44746a5f74f6d7339b060df9c235c4be 100644 (file)
@@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR            1
-#define I40E_VIRTCHNL_VERSION_MINOR            0
+#define I40E_VIRTCHNL_VERSION_MINOR            1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
 struct i40e_virtchnl_version_info {
        u32 major;
        u32 minor;
@@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource {
        u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2    0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE  0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN  0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2            0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP         0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE          0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ                0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG       0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
index 23f95cdbdfcc2c20d5913fbab3a2b71a1bb61064..d29d4062addf51141dbeefc152d1613b8171c981 100644 (file)
@@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  **/
 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
 {
-       struct i40e_hw *hw = &pf->hw;
-       u32 reg;
-
-       reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-       reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-       wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-       i40e_flush(hw);
+       i40e_vc_notify_vf_reset(vf);
+       i40e_reset_vf(vf, false);
 }
 
 /**
@@ -282,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
        }
        tempmap = vecmap->rxq_map;
        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-               linklistmap |= (1 <<
-                               (I40E_VIRTCHNL_SUPPORTED_QTYPES *
-                                vsi_queue_id));
+               linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+                                   vsi_queue_id));
        }
 
        tempmap = vecmap->txq_map;
        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-               linklistmap |= (1 <<
-                               (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
-                                + 1));
+               linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+                                    vsi_queue_id + 1));
        }
 
        next_q = find_first_bit(&linklistmap,
@@ -337,7 +330,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
                reg = (vector_id) |
                    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
                    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
-                   (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+                   BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
                    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
                wr32(hw, reg_idx, reg);
        }
@@ -542,11 +535,13 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                if (vf->port_vlan_id)
                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
                f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-                                   vf->port_vlan_id, true, false);
+                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
+                                   true, false);
                if (!f)
                        dev_info(&pf->pdev->dev,
                                 "Could not allocate VF MAC addr\n");
-               f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+               f = i40e_add_filter(vsi, brdcast,
+                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
                                    true, false);
                if (!f)
                        dev_info(&pf->pdev->dev,
@@ -835,6 +830,7 @@ complete_reset:
        i40e_alloc_vf_res(vf);
        i40e_enable_vf_mappings(vf);
        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+       clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
 
        /* tell the VF the reset is done */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -899,7 +895,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
                for (vf_id = 0; vf_id < tmp; vf_id++) {
                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
                }
        }
        clear_bit(__I40E_VF_DISABLE, &pf->state);
@@ -1123,12 +1119,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
  *
  * called from the VF to request the API version used by the PF
  **/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
 {
        struct i40e_virtchnl_version_info info = {
                I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
        };
 
+       vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+       /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+       if (VF_IS_V10(vf))
+               info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
                                      I40E_SUCCESS, (u8 *)&info,
                                      sizeof(struct
@@ -1143,7 +1143,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
  *
  * called from the VF to request its resources
  **/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
 {
        struct i40e_virtchnl_vf_resource *vfres = NULL;
        struct i40e_pf *pf = vf->pf;
@@ -1167,11 +1167,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
                len = 0;
                goto err;
        }
+       if (VF_IS_V11(vf))
+               vf->driver_caps = *(u32 *)msg;
+       else
+               vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+                                 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+                                 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
-               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+                                          I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
 
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
@@ -1773,9 +1780,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
                valid_len = sizeof(struct i40e_virtchnl_version_info);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
-       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
                valid_len = 0;
                break;
+       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+               if (VF_IS_V11(vf))
+                       valid_len = sizeof(u32);
+               else
+                       valid_len = 0;
+               break;
        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
                valid_len = sizeof(struct i40e_virtchnl_txq_info);
                break;
@@ -1888,10 +1900,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
 
        switch (v_opcode) {
        case I40E_VIRTCHNL_OP_VERSION:
-               ret = i40e_vc_get_version_msg(vf);
+               ret = i40e_vc_get_version_msg(vf, msg);
                break;
        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
-               ret = i40e_vc_get_vf_resources_msg(vf);
+               ret = i40e_vc_get_vf_resources_msg(vf, msg);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
                i40e_vc_reset_vf_msg(vf);
@@ -1969,9 +1981,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
                vf = &pf->vf[vf_id];
                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-               if (reg & (1 << bit_idx)) {
+               if (reg & BIT(bit_idx)) {
                        /* clear the bit in GLGEN_VFLRSTAT */
-                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 
                        if (!test_bit(__I40E_DOWN, &pf->state))
                                i40e_reset_vf(vf, true);
@@ -2023,7 +2035,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
 
        /* delete the temporary mac address */
-       i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+       i40e_del_filter(vsi, vf->default_lan_addr.addr,
+                       vf->port_vlan_id ? vf->port_vlan_id : -1,
                        true, false);
 
        /* Delete all the filters for this VSI - we're going to kill it
@@ -2088,6 +2101,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                goto error_pvid;
        }
 
+       if (vsi->info.pvid == (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+               /* duplicate request, so just return success */
+               goto error_pvid;
+
        if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
                dev_err(&pf->pdev->dev,
                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
index 09043c1aae5435109fcd2bab8b4059c52c105961..736f6f08b4f26c98042375db16767eefca0fbe66 100644 (file)
@@ -42,6 +42,9 @@
 #define I40E_VLAN_MASK                 0xFFF
 #define I40E_PRIORITY_MASK             0x7000
 
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
        I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -75,6 +78,8 @@ struct i40e_vf {
        u16 vf_id;
        /* all VF vsis connect to the same parent */
        enum i40e_switch_element_types parent_type;
+       struct i40e_virtchnl_version_info vf_ver;
+       u32 driver_caps; /* reported by VF driver */
 
        /* VF Port Extender (PE) stag if used */
        u16 stag;
index e715bccfb5d256c055a99163a1fd0d177c190754..d5bd6f06692137819e557fc719df2f981253daa3 100644 (file)
@@ -34,7 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
-#define I40E_FW_API_VERSION_MINOR      0x0002
+#define I40E_FW_API_VERSION_MINOR      0x0004
 #define I40E_FW_API_VERSION_A0_MINOR  0x0000
 
 struct i40e_aq_desc {
@@ -133,12 +133,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
-       i40e_aqc_opc_set_cppm_configuration     = 0x0103,
-       i40e_aqc_opc_set_arp_proxy_entry        = 0x0104,
-       i40e_aqc_opc_set_ns_proxy_entry         = 0x0105,
-
        /* LAA */
-       i40e_aqc_opc_mng_laa            = 0x0106,   /* AQ obsolete */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
 
@@ -260,7 +255,6 @@ enum i40e_admin_queue_opc {
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
-       i40e_aqc_opc_tunnel_key_structure       = 0x0B10,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
@@ -272,8 +266,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_oem_ocbb_initialize        = 0xFE03,
 
        /* debug commands */
-       i40e_aqc_opc_debug_get_deviceid         = 0xFF00,
-       i40e_aqc_opc_debug_set_mode             = 0xFF01,
        i40e_aqc_opc_debug_read_reg             = 0xFF03,
        i40e_aqc_opc_debug_write_reg            = 0xFF04,
        i40e_aqc_opc_debug_modify_reg           = 0xFF07,
@@ -507,7 +499,8 @@ struct i40e_aqc_mac_address_read {
 #define I40E_AQC_SAN_ADDR_VALID                0x20
 #define I40E_AQC_PORT_ADDR_VALID       0x40
 #define I40E_AQC_WOL_ADDR_VALID                0x80
-#define I40E_AQC_ADDR_VALID_MASK       0xf0
+#define I40E_AQC_MC_MAG_EN_VALID       0x100
+#define I40E_AQC_ADDR_VALID_MASK       0x1F0
        u8      reserved[6];
        __le32  addr_high;
        __le32  addr_low;
@@ -530,7 +523,9 @@ struct i40e_aqc_mac_address_write {
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY   0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL    0x4000
 #define I40E_AQC_WRITE_TYPE_PORT       0x8000
-#define I40E_AQC_WRITE_TYPE_MASK       0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG      0xC000
+#define I40E_AQC_WRITE_TYPE_MASK       0xC000
+
        __le16  mac_sah;
        __le32  mac_sal;
        u8      reserved[8];
@@ -1066,6 +1061,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
        __le16  seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
        __le16  vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK             0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
        u8      reserved[8];
 };
index 39fcb1dc4ea64d80b3601a3d62ff1052b22907c6..56c7e751149b0cba324c2722178705c42e4c908f 100644 (file)
@@ -71,6 +71,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
        return status;
 }
 
+/**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+       switch (aq_err) {
+       case I40E_AQ_RC_OK:
+               return "OK";
+       case I40E_AQ_RC_EPERM:
+               return "I40E_AQ_RC_EPERM";
+       case I40E_AQ_RC_ENOENT:
+               return "I40E_AQ_RC_ENOENT";
+       case I40E_AQ_RC_ESRCH:
+               return "I40E_AQ_RC_ESRCH";
+       case I40E_AQ_RC_EINTR:
+               return "I40E_AQ_RC_EINTR";
+       case I40E_AQ_RC_EIO:
+               return "I40E_AQ_RC_EIO";
+       case I40E_AQ_RC_ENXIO:
+               return "I40E_AQ_RC_ENXIO";
+       case I40E_AQ_RC_E2BIG:
+               return "I40E_AQ_RC_E2BIG";
+       case I40E_AQ_RC_EAGAIN:
+               return "I40E_AQ_RC_EAGAIN";
+       case I40E_AQ_RC_ENOMEM:
+               return "I40E_AQ_RC_ENOMEM";
+       case I40E_AQ_RC_EACCES:
+               return "I40E_AQ_RC_EACCES";
+       case I40E_AQ_RC_EFAULT:
+               return "I40E_AQ_RC_EFAULT";
+       case I40E_AQ_RC_EBUSY:
+               return "I40E_AQ_RC_EBUSY";
+       case I40E_AQ_RC_EEXIST:
+               return "I40E_AQ_RC_EEXIST";
+       case I40E_AQ_RC_EINVAL:
+               return "I40E_AQ_RC_EINVAL";
+       case I40E_AQ_RC_ENOTTY:
+               return "I40E_AQ_RC_ENOTTY";
+       case I40E_AQ_RC_ENOSPC:
+               return "I40E_AQ_RC_ENOSPC";
+       case I40E_AQ_RC_ENOSYS:
+               return "I40E_AQ_RC_ENOSYS";
+       case I40E_AQ_RC_ERANGE:
+               return "I40E_AQ_RC_ERANGE";
+       case I40E_AQ_RC_EFLUSHED:
+               return "I40E_AQ_RC_EFLUSHED";
+       case I40E_AQ_RC_BAD_ADDR:
+               return "I40E_AQ_RC_BAD_ADDR";
+       case I40E_AQ_RC_EMODE:
+               return "I40E_AQ_RC_EMODE";
+       case I40E_AQ_RC_EFBIG:
+               return "I40E_AQ_RC_EFBIG";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+       return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+       switch (stat_err) {
+       case 0:
+               return "OK";
+       case I40E_ERR_NVM:
+               return "I40E_ERR_NVM";
+       case I40E_ERR_NVM_CHECKSUM:
+               return "I40E_ERR_NVM_CHECKSUM";
+       case I40E_ERR_PHY:
+               return "I40E_ERR_PHY";
+       case I40E_ERR_CONFIG:
+               return "I40E_ERR_CONFIG";
+       case I40E_ERR_PARAM:
+               return "I40E_ERR_PARAM";
+       case I40E_ERR_MAC_TYPE:
+               return "I40E_ERR_MAC_TYPE";
+       case I40E_ERR_UNKNOWN_PHY:
+               return "I40E_ERR_UNKNOWN_PHY";
+       case I40E_ERR_LINK_SETUP:
+               return "I40E_ERR_LINK_SETUP";
+       case I40E_ERR_ADAPTER_STOPPED:
+               return "I40E_ERR_ADAPTER_STOPPED";
+       case I40E_ERR_INVALID_MAC_ADDR:
+               return "I40E_ERR_INVALID_MAC_ADDR";
+       case I40E_ERR_DEVICE_NOT_SUPPORTED:
+               return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+       case I40E_ERR_MASTER_REQUESTS_PENDING:
+               return "I40E_ERR_MASTER_REQUESTS_PENDING";
+       case I40E_ERR_INVALID_LINK_SETTINGS:
+               return "I40E_ERR_INVALID_LINK_SETTINGS";
+       case I40E_ERR_AUTONEG_NOT_COMPLETE:
+               return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+       case I40E_ERR_RESET_FAILED:
+               return "I40E_ERR_RESET_FAILED";
+       case I40E_ERR_SWFW_SYNC:
+               return "I40E_ERR_SWFW_SYNC";
+       case I40E_ERR_NO_AVAILABLE_VSI:
+               return "I40E_ERR_NO_AVAILABLE_VSI";
+       case I40E_ERR_NO_MEMORY:
+               return "I40E_ERR_NO_MEMORY";
+       case I40E_ERR_BAD_PTR:
+               return "I40E_ERR_BAD_PTR";
+       case I40E_ERR_RING_FULL:
+               return "I40E_ERR_RING_FULL";
+       case I40E_ERR_INVALID_PD_ID:
+               return "I40E_ERR_INVALID_PD_ID";
+       case I40E_ERR_INVALID_QP_ID:
+               return "I40E_ERR_INVALID_QP_ID";
+       case I40E_ERR_INVALID_CQ_ID:
+               return "I40E_ERR_INVALID_CQ_ID";
+       case I40E_ERR_INVALID_CEQ_ID:
+               return "I40E_ERR_INVALID_CEQ_ID";
+       case I40E_ERR_INVALID_AEQ_ID:
+               return "I40E_ERR_INVALID_AEQ_ID";
+       case I40E_ERR_INVALID_SIZE:
+               return "I40E_ERR_INVALID_SIZE";
+       case I40E_ERR_INVALID_ARP_INDEX:
+               return "I40E_ERR_INVALID_ARP_INDEX";
+       case I40E_ERR_INVALID_FPM_FUNC_ID:
+               return "I40E_ERR_INVALID_FPM_FUNC_ID";
+       case I40E_ERR_QP_INVALID_MSG_SIZE:
+               return "I40E_ERR_QP_INVALID_MSG_SIZE";
+       case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+               return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+       case I40E_ERR_INVALID_FRAG_COUNT:
+               return "I40E_ERR_INVALID_FRAG_COUNT";
+       case I40E_ERR_QUEUE_EMPTY:
+               return "I40E_ERR_QUEUE_EMPTY";
+       case I40E_ERR_INVALID_ALIGNMENT:
+               return "I40E_ERR_INVALID_ALIGNMENT";
+       case I40E_ERR_FLUSHED_QUEUE:
+               return "I40E_ERR_FLUSHED_QUEUE";
+       case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+               return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+       case I40E_ERR_INVALID_IMM_DATA_SIZE:
+               return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+       case I40E_ERR_TIMEOUT:
+               return "I40E_ERR_TIMEOUT";
+       case I40E_ERR_OPCODE_MISMATCH:
+               return "I40E_ERR_OPCODE_MISMATCH";
+       case I40E_ERR_CQP_COMPL_ERROR:
+               return "I40E_ERR_CQP_COMPL_ERROR";
+       case I40E_ERR_INVALID_VF_ID:
+               return "I40E_ERR_INVALID_VF_ID";
+       case I40E_ERR_INVALID_HMCFN_ID:
+               return "I40E_ERR_INVALID_HMCFN_ID";
+       case I40E_ERR_BACKING_PAGE_ERROR:
+               return "I40E_ERR_BACKING_PAGE_ERROR";
+       case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+               return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+       case I40E_ERR_INVALID_PBLE_INDEX:
+               return "I40E_ERR_INVALID_PBLE_INDEX";
+       case I40E_ERR_INVALID_SD_INDEX:
+               return "I40E_ERR_INVALID_SD_INDEX";
+       case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+               return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+       case I40E_ERR_INVALID_SD_TYPE:
+               return "I40E_ERR_INVALID_SD_TYPE";
+       case I40E_ERR_MEMCPY_FAILED:
+               return "I40E_ERR_MEMCPY_FAILED";
+       case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+               return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+       case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+               return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+       case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+               return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+       case I40E_ERR_SRQ_ENABLED:
+               return "I40E_ERR_SRQ_ENABLED";
+       case I40E_ERR_ADMIN_QUEUE_ERROR:
+               return "I40E_ERR_ADMIN_QUEUE_ERROR";
+       case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+               return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+       case I40E_ERR_BUF_TOO_SHORT:
+               return "I40E_ERR_BUF_TOO_SHORT";
+       case I40E_ERR_ADMIN_QUEUE_FULL:
+               return "I40E_ERR_ADMIN_QUEUE_FULL";
+       case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+               return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+       case I40E_ERR_BAD_IWARP_CQE:
+               return "I40E_ERR_BAD_IWARP_CQE";
+       case I40E_ERR_NVM_BLANK_MODE:
+               return "I40E_ERR_NVM_BLANK_MODE";
+       case I40E_ERR_NOT_IMPLEMENTED:
+               return "I40E_ERR_NOT_IMPLEMENTED";
+       case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+               return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+       case I40E_ERR_DIAG_TEST_FAILED:
+               return "I40E_ERR_DIAG_TEST_FAILED";
+       case I40E_ERR_NOT_READY:
+               return "I40E_ERR_NOT_READY";
+       case I40E_NOT_SUPPORTED:
+               return "I40E_NOT_SUPPORTED";
+       case I40E_ERR_FIRMWARE_API_VERSION:
+               return "I40E_ERR_FIRMWARE_API_VERSION";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+       return hw->err_str;
+}
+
 /**
  * i40evf_debug_aq
  * @hw: debug mask related to admin queue
index 931c880443003d7aa3a1787ddd1b5f27241236cd..00ed24bfce1347f0b80b077139a754e58c6da376 100644 (file)
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
 struct i40e_hmc_pd_entry {
        struct i40e_hmc_bp bp;
        u32 sd_index;
+       bool rsrc_pg;
        bool valid;
 };
 
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
                 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |              \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
-               (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+               BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);              \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index);
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
                                        struct i40e_hmc_info *hmc_info,
                                        u32 idx);
index 58e37a44b80a10233f00d004ee8fb9f5d496c12e..856eb9d06595eb7eff8ef66df8df0eb137751808 100644 (file)
@@ -60,6 +60,8 @@ void i40e_idle_aq(struct i40e_hw *hw);
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
index 395f32f226c08ac924e7d3e707ef7124b2744ec5..60f88e4ad065ebdfe609e5e94cdce94e5a410d3c 100644 (file)
@@ -404,7 +404,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
         *  20-1249MB/s bulk   (8000 ints/s)
         */
        bytes_per_int = rc->total_bytes / rc->itr;
-       switch (rc->itr) {
+       switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
                if (bytes_per_int > 10)
                        new_latency_range = I40E_LOW_LATENCY;
@@ -417,9 +417,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                break;
        case I40E_BULK_LATENCY:
                if (bytes_per_int <= 20)
-                       rc->latency_range = I40E_LOW_LATENCY;
+                       new_latency_range = I40E_LOW_LATENCY;
+               break;
+       default:
+               if (bytes_per_int <= 20)
+                       new_latency_range = I40E_LOW_LATENCY;
                break;
        }
+       rc->latency_range = new_latency_range;
 
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
@@ -435,42 +440,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                break;
        }
 
-       if (new_itr != rc->itr) {
-               /* do an exponential smoothing */
-               new_itr = (10 * new_itr * rc->itr) /
-                         ((9 * new_itr) + rc->itr);
-               rc->itr = new_itr & I40E_MAX_ITR;
-       }
+       if (new_itr != rc->itr)
+               rc->itr = new_itr;
 
        rc->total_bytes = 0;
        rc->total_packets = 0;
 }
 
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-       u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-       struct i40e_hw *hw = &q_vector->vsi->back->hw;
-       u32 reg_addr;
-       u16 old_itr;
-
-       reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
-       old_itr = q_vector->rx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->rx);
-       if (old_itr != q_vector->rx.itr)
-               wr32(hw, reg_addr, q_vector->rx.itr);
-
-       reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
-       old_itr = q_vector->tx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->tx);
-       if (old_itr != q_vector->tx.itr)
-               wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
+/*
  * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
  * @tx_ring: the tx ring to set up
  *
@@ -873,7 +850,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                return;
 
        /* did the hardware decode the packet and checksum? */
-       if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+       if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
                return;
 
        /* both known and outer_ip must be set for the below code to work */
@@ -888,25 +865,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                ipv6 = true;
 
        if (ipv4 &&
-           (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-                        (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+           (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+                        BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
                goto checksum_fail;
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
-           rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+           rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                /* don't increment checksum err here, non-fatal err */
                return;
 
        /* there was some L4 error, count error and punt packet to the stack */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
                goto checksum_fail;
 
        /* handle packets that were not able to be checksummed due
         * to arrival speed, in this case the stack can compute
         * the csum.
         */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
        /* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1027,7 +1004,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1063,8 +1040,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1116,7 +1093,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        struct i40e_rx_buffer *next_buffer;
 
                        next_buffer = &rx_ring->rx_bi[i];
@@ -1126,7 +1103,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
                        continue;
                }
@@ -1141,7 +1118,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
 #ifdef I40E_FCOE
@@ -1202,7 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1220,7 +1197,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1238,13 +1215,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        rx_ring->rx_stats.non_eop_descs++;
                        continue;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
                        /* TODO: shouldn't we increment a counter indicating the
                         * drop?
@@ -1262,7 +1239,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
                i40e_receive_skb(rx_ring, skb, vlan_tag);
@@ -1280,6 +1257,67 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        return total_rx_packets;
 }
 
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+                                         struct i40e_q_vector *q_vector)
+{
+       struct i40e_hw *hw = &vsi->back->hw;
+       u16 old_itr;
+       int vector;
+       u32 val;
+
+       vector = (q_vector->v_idx + vsi->base_vector);
+       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+               old_itr = q_vector->rx.itr;
+               i40e_set_new_dynamic_itr(&q_vector->rx);
+               if (old_itr != q_vector->rx.itr) {
+                       val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                       I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+                       (I40E_RX_ITR <<
+                               I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                       (q_vector->rx.itr <<
+                               I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
+               } else {
+                       val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                       I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+                       (I40E_ITR_NONE <<
+                               I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
+               }
+               if (!test_bit(__I40E_DOWN, &vsi->state))
+                       wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+       } else {
+               i40evf_irq_enable_queues(vsi->back, 1
+                       << q_vector->v_idx);
+       }
+       if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+               old_itr = q_vector->tx.itr;
+               i40e_set_new_dynamic_itr(&q_vector->tx);
+               if (old_itr != q_vector->tx.itr) {
+                       val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+                               (I40E_TX_ITR <<
+                                  I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                               (q_vector->tx.itr <<
+                                  I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+               } else {
+                       val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+                               (I40E_ITR_NONE <<
+                                  I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
+               }
+               if (!test_bit(__I40E_DOWN, &vsi->state))
+                       wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+       } else {
+               i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+       }
+}
+
 /**
  * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
@@ -1336,13 +1374,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
-       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-           ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-               i40e_update_dynamic_itr(q_vector);
-
-       if (!test_bit(__I40E_DOWN, &vsi->state))
-               i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
-
+       i40e_update_enable_itr(vsi, q_vector);
        return 0;
 }
 
@@ -1841,6 +1873,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
            netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
                                                   tx_ring->queue_index)))
                writel(i, tx_ring->tail);
+       else
+               prefetchw(tx_desc + 1);
 
        return;
 
index e7a34f899f2cbb8150495a31e0690a95e90efc1a..6b47c818d1f08c11b81fd5e21b7ba9b162bdadd9 100644 (file)
@@ -66,17 +66,17 @@ enum i40e_dyn_idx_t {
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,16 +129,16 @@ enum i40e_dyn_idx_t {
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING  4
 
-#define I40E_TX_FLAGS_CSUM             (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN          (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN          (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO              (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4             (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
-#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL     (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM             BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN          BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN          BIT(2)
+#define I40E_TX_FLAGS_TSO              BIT(3)
+#define I40E_TX_FLAGS_IPV4             BIT(4)
+#define I40E_TX_FLAGS_IPV6             BIT(5)
+#define I40E_TX_FLAGS_FCCRC            BIT(6)
+#define I40E_TX_FLAGS_FSO              BIT(7)
+#define I40E_TX_FLAGS_FD_SB            BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
index c463ec41579c708ffbe606ea0b31ca0485ea4c58..4ba9a012dcbac1bdbc87366df00f9d85c4e207f0 100644 (file)
@@ -213,7 +213,17 @@ struct i40e_hw_capabilities {
        bool dcb;
        bool fcoe;
        bool iscsi; /* Indicates iSCSI enabled */
-       bool mfp_mode_1;
+       bool flex10_enable;
+       bool flex10_capable;
+       u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN       0x0
+#define I40E_FLEX10_MODE_DCC           0x1
+#define I40E_FLEX10_MODE_DCI           0x2
+
+       u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR   0x1
+#define I40E_FLEX10_STATUS_VC_MODE     0x2
+
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
@@ -481,6 +491,7 @@ struct i40e_hw {
 
        /* debug mask */
        u32 debug_mask;
+       char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
@@ -594,7 +605,7 @@ enum i40e_rx_desc_status_bits {
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT      0
-#define I40E_RXD_QW1_STATUS_MASK       (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK       ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
                                         << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -602,8 +613,8 @@ enum i40e_rx_desc_status_bits {
                                             I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK     (0x1UL << \
-                                        I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+                                   BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
        I40E_RX_DESC_FLTSTAT_NO_DATA    = 0,
@@ -737,8 +748,7 @@ enum i40e_rx_ptype_payload_layer {
                                         I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT  63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK   (0x1ULL << \
-                                        I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK   BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
        /* Note: These are predefined bit offsets */
@@ -914,12 +924,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_QW0_NATT_SHIFT    9
 #define I40E_TXD_CTX_QW0_NATT_MASK     (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING     (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING     BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING     (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT       11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK        (0x1ULL << \
-                                        I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+                                      BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST      I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -984,8 +994,8 @@ enum i40e_filter_program_desc_fd_status {
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT       23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK        (0x1FFUL << \
-                                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+                                      BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT    4
 #define I40E_TXD_FLTR_QW1_CMD_MASK     (0xFFFFULL << \
@@ -1003,8 +1013,7 @@ enum i40e_filter_program_desc_pcmd {
 #define I40E_TXD_FLTR_QW1_DEST_MASK    (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT        (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
-                                        I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT      (0x9ULL + \
                                                 I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1109,6 +1118,8 @@ struct i40e_hw_port_stats {
        u64 fd_atr_match;
        u64 fd_sb_match;
        u64 fd_atr_tunnel_match;
+       u32 fd_atr_status;
+       u32 fd_sb_status;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
index 59f62f0e65dd3ecaf230d2aa433452cc68bfa8d2..1e89dea0d52925e0c18bab22da4266594da02d36 100644 (file)
@@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR            1
-#define I40E_VIRTCHNL_VERSION_MINOR            0
+#define I40E_VIRTCHNL_VERSION_MINOR            1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
 struct i40e_virtchnl_version_info {
        u32 major;
        u32 minor;
@@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource {
        u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2    0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE  0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN  0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2            0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP         0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE          0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ                0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG       0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
index fea3b75a9a35fcdc58b9d5f5d0f6125dbf62e0cf..c33c7cce52fe2c5decf79e514a342275cb7f2f3a 100644 (file)
@@ -207,17 +207,17 @@ struct i40evf_adapter {
        struct msix_entry *msix_entries;
 
        u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED              (u32)(1)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
-#define I40EVF_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
-#define I40EVF_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
-#define I40EVF_FLAG_IN_NETPOLL                   (u32)(1 << 4)
-#define I40EVF_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
-#define I40EVF_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
-#define I40EVF_FLAG_PF_COMMS_FAILED              (u32)(1 << 8)
-#define I40EVF_FLAG_RESET_PENDING                (u32)(1 << 9)
-#define I40EVF_FLAG_RESET_NEEDED                 (u32)(1 << 10)
+#define I40EVF_FLAG_RX_CSUM_ENABLED              BIT(0)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
+#define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
+#define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
+#define I40EVF_FLAG_IN_NETPOLL                   BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED              BIT(8)
+#define I40EVF_FLAG_RESET_PENDING                BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED                 BIT(10)
 /* duplcates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED              0
 #define I40E_FLAG_DCB_ENABLED                   0
@@ -225,15 +225,16 @@ struct i40evf_adapter {
 #define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
        /* flags for admin queue service task */
        u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES           (u32)(1)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES          (u32)(1 << 1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER          (u32)(1 << 2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER         (u32)(1 << 3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER          (u32)(1 << 4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER         (u32)(1 << 5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES                (u32)(1 << 6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS             (u32)(1 << 7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET            (u32)(1 << 8)
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES           BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES          BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER          BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER         BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER          BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER         BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES                BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS             BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET            BIT(8)
+#define I40EVF_FLAG_AQ_GET_CONFIG              BIT(10)
 
        /* OS defined structs */
        struct net_device *netdev;
@@ -249,8 +250,17 @@ struct i40evf_adapter {
        bool netdev_registered;
        bool link_up;
        enum i40e_virtchnl_ops current_op;
+#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
+                           I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
+                   I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
+                         I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
        struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
        struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+       struct i40e_virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+                      ((_a)->pf_version.minor == 1))
        u16 msg_enable;
        struct i40e_eth_stats current_stats;
        struct i40e_vsi vsi;
@@ -264,6 +274,7 @@ extern const char i40evf_driver_version[];
 
 int i40evf_up(struct i40evf_adapter *adapter);
 void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
 void i40evf_reset(struct i40evf_adapter *adapter);
 void i40evf_set_ethtool_ops(struct net_device *netdev);
 void i40evf_update_stats(struct i40evf_adapter *adapter);
index 2b53c870e7f113ca0695afab3636446e1015e4e8..4790437a50ac0d3e7f94b2733acf8e7c50c3f18e 100644 (file)
@@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
 
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
-               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V4_FLOW:
-               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
 
@@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
                break;
 
        case TCP_V6_FLOW:
-               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V6_FLOW:
-               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
 
@@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
        case TCP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
        case TCP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
        case UDP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                default:
                        return -EINVAL;
@@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
        case UDP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                default:
                        return -EINVAL;
@@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
                break;
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
                break;
        case IPV4_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+               hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+                        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                break;
        case IPV6_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+               hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+                        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                break;
        default:
                return -EINVAL;
index 4ab4ebba07a18e5b1b0539cf0c0b8a7122f6fdc2..1503cad918d88d42a559ecf4af15692751e3ba13 100644 (file)
@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.2.25"
+#define DRV_VERSION "1.3.2"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -240,7 +240,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
        int i;
 
        for (i = 1; i < adapter->num_msix_vectors; i++) {
-               if (mask & (1 << (i - 1))) {
+               if (mask & BIT(i - 1)) {
                        wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
                             I40E_VFINT_DYN_CTLN1_INTENA_MASK |
                             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
@@ -268,7 +268,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
                wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
        }
        for (i = 1; i < adapter->num_msix_vectors; i++) {
-               if (mask & (1 << i)) {
+               if (mask & BIT(i)) {
                        dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
                        dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
                                   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
@@ -377,7 +377,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
        q_vector->tx.count++;
        q_vector->tx.latency_range = I40E_LOW_LATENCY;
        q_vector->num_ringpairs++;
-       q_vector->ring_mask |= (1 << t_idx);
+       q_vector->ring_mask |= BIT(t_idx);
 }
 
 /**
@@ -406,7 +406,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
        /* The ideal configuration...
         * We have enough vectors to map one per queue.
         */
-       if (q_vectors == (rxr_remaining * 2)) {
+       if (q_vectors >= (rxr_remaining * 2)) {
                for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
                        i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
 
@@ -892,8 +892,10 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
                                        break;
                                }
                        }
+                       if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+                               found = true;
                }
-               if (found) {
+               if (!found) {
                        f->remove = true;
                        adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
                }
@@ -1369,6 +1371,10 @@ static void i40evf_watchdog_task(struct work_struct *work)
                }
                goto watchdog_done;
        }
+       if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+               i40evf_send_vf_config_msg(adapter);
+               goto watchdog_done;
+       }
 
        if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
                i40evf_disable_queues(adapter);
@@ -1604,7 +1610,8 @@ continue_reset:
                dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
                         err);
 
-       i40evf_map_queues(adapter);
+       adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+       adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 
        /* re-add all MAC filters */
        list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -1614,7 +1621,7 @@ continue_reset:
        list_for_each_entry(f, &adapter->vlan_filter_list, list) {
                f->add = true;
        }
-       adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+       adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
        adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
        i40evf_misc_irq_enable(adapter);
@@ -1856,6 +1863,7 @@ static int i40evf_open(struct net_device *netdev)
        if (err)
                goto err_req_irq;
 
+       i40evf_add_filter(adapter, adapter->hw.mac.addr);
        i40evf_configure(adapter);
 
        err = i40evf_up_complete(adapter);
@@ -1978,6 +1986,62 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
        return -EBUSY;
 }
 
+/**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       int i;
+
+       /* got VF config message back from PF, now we can parse it */
+       for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+               if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+                       adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+       }
+       if (!adapter->vsi_res) {
+               dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+               return -ENODEV;
+       }
+
+       if (adapter->vf_res->vf_offload_flags
+           & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+               netdev->vlan_features = netdev->features;
+               netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_HW_VLAN_CTAG_RX |
+                                   NETIF_F_HW_VLAN_CTAG_FILTER;
+       }
+       netdev->features |= NETIF_F_HIGHDMA |
+                           NETIF_F_SG |
+                           NETIF_F_IP_CSUM |
+                           NETIF_F_SCTP_CSUM |
+                           NETIF_F_IPV6_CSUM |
+                           NETIF_F_TSO |
+                           NETIF_F_TSO6 |
+                           NETIF_F_RXCSUM |
+                           NETIF_F_GRO;
+
+       /* copy netdev features into list of user selectable features */
+       netdev->hw_features |= netdev->features;
+       netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+       adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+       adapter->vsi.back = adapter;
+       adapter->vsi.base_vector = 1;
+       adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+       adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+       adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+       adapter->vsi.netdev = adapter->netdev;
+       return 0;
+}
+
 /**
  * i40evf_init_task - worker thread to perform delayed initialization
  * @work: pointer to work_struct containing our data
@@ -1996,10 +2060,9 @@ static void i40evf_init_task(struct work_struct *work)
                                                      struct i40evf_adapter,
                                                      init_task.work);
        struct net_device *netdev = adapter->netdev;
-       struct i40evf_mac_filter *f;
        struct i40e_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       int i, err, bufsz;
+       int err, bufsz;
 
        switch (adapter->state) {
        case __I40EVF_STARTUP:
@@ -2050,6 +2113,12 @@ static void i40evf_init_task(struct work_struct *work)
                if (err) {
                        if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
                                err = i40evf_send_api_ver(adapter);
+                       else
+                               dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+                                       adapter->pf_version.major,
+                                       adapter->pf_version.minor,
+                                       I40E_VIRTCHNL_VERSION_MAJOR,
+                                       I40E_VIRTCHNL_VERSION_MINOR);
                        goto err;
                }
                err = i40evf_send_vf_config_msg(adapter);
@@ -2085,42 +2154,15 @@ static void i40evf_init_task(struct work_struct *work)
        default:
                goto err_alloc;
        }
-       /* got VF config message back from PF, now we can parse it */
-       for (i = 0; i < adapter->vf_res->num_vsis; i++) {
-               if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
-                       adapter->vsi_res = &adapter->vf_res->vsi_res[i];
-       }
-       if (!adapter->vsi_res) {
-               dev_err(&pdev->dev, "No LAN VSI found\n");
+       if (i40evf_process_config(adapter))
                goto err_alloc;
-       }
+       adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 
        adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
 
        netdev->netdev_ops = &i40evf_netdev_ops;
        i40evf_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
-       netdev->features |= NETIF_F_HIGHDMA |
-                           NETIF_F_SG |
-                           NETIF_F_IP_CSUM |
-                           NETIF_F_SCTP_CSUM |
-                           NETIF_F_IPV6_CSUM |
-                           NETIF_F_TSO |
-                           NETIF_F_TSO6 |
-                           NETIF_F_RXCSUM |
-                           NETIF_F_GRO;
-
-       if (adapter->vf_res->vf_offload_flags
-           & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-               netdev->vlan_features = netdev->features;
-               netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
-                                   NETIF_F_HW_VLAN_CTAG_RX |
-                                   NETIF_F_HW_VLAN_CTAG_FILTER;
-       }
-
-       /* copy netdev features into list of user selectable features */
-       netdev->hw_features |= netdev->features;
-       netdev->hw_features &= ~NETIF_F_RXCSUM;
 
        if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
                dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2130,16 +2172,6 @@ static void i40evf_init_task(struct work_struct *work)
        ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
        ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
 
-       f = kzalloc(sizeof(*f), GFP_ATOMIC);
-       if (!f)
-               goto err_sw_init;
-
-       ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
-       f->add = true;
-       adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
-       list_add(&f->list, &adapter->mac_filter_list);
-
        init_timer(&adapter->watchdog_timer);
        adapter->watchdog_timer.function = &i40evf_watchdog_timer;
        adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -2161,17 +2193,6 @@ static void i40evf_init_task(struct work_struct *work)
 
        netif_carrier_off(netdev);
 
-       adapter->vsi.id = adapter->vsi_res->vsi_id;
-       adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
-       adapter->vsi.back = adapter;
-       adapter->vsi.base_vector = 1;
-       adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
-       adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
-                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
-       adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
-                                      ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-       adapter->vsi.netdev = adapter->netdev;
-
        if (!adapter->netdev_registered) {
                err = register_netdev(netdev);
                if (err)
@@ -2299,7 +2320,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw = &adapter->hw;
        hw->back = adapter;
 
-       adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+       adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
        adapter->state = __I40EVF_STARTUP;
 
        /* Call save state here because it relies on the adapter struct. */
index 61e090558f31334588ca06a4860099c5428becc9..d4eb1a5e7d42c4562a659202685d0e8331d384c9 100644 (file)
@@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
 
        err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
        if (err)
-               dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
-                       op, err, hw->aq.asq_last_status);
+               dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+                       op, i40evf_stat_str(hw, err),
+                       i40evf_aq_str(hw, hw->aq.asq_last_status));
        return err;
 }
 
@@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
        }
 
        pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
-       if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
-           (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+       adapter->pf_version = *pf_vvi;
+
+       if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+           ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+            (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
                err = -EIO;
 
 out_alloc:
@@ -145,8 +149,24 @@ out:
  **/
 int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
 {
-       return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
-                                 NULL, 0);
+       u32 caps;
+
+       adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+       caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+              I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+              I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+              I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+       adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+       if (PF_IS_V11(adapter))
+               return i40evf_send_pf_msg(adapter,
+                                         I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+                                         (u8 *)&caps, sizeof(caps));
+       else
+               return i40evf_send_pf_msg(adapter,
+                                         I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+                                         NULL, 0);
 }
 
 /**
@@ -274,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
        }
        adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
        vqs.vsi_id = adapter->vsi_res->vsi_id;
-       vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+       vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
        vqs.rx_queues = vqs.tx_queues;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -299,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
        }
        adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
        vqs.vsi_id = adapter->vsi_res->vsi_id;
-       vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+       vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
        vqs.rx_queues = vqs.tx_queues;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
@@ -708,8 +728,9 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                return;
        }
        if (v_retval) {
-               dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
-                       __func__, v_retval, v_opcode);
+               dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
+                       __func__, v_retval,
+                       i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
        }
        switch (v_opcode) {
        case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -729,6 +750,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                adapter->current_stats = *stats;
                }
                break;
+       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
+               u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+                         I40E_MAX_VF_VSI *
+                         sizeof(struct i40e_virtchnl_vsi_resource);
+               memcpy(adapter->vf_res, msg, min(msglen, len));
+               i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+               i40evf_process_config(adapter);
+               }
+               break;
        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
                /* enable transmits */
                i40evf_irq_enable(adapter, true);
@@ -740,7 +770,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                i40evf_free_all_rx_resources(adapter);
                break;
        case I40E_VIRTCHNL_OP_VERSION:
-       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
                /* Don't display an error if we get these out of sequence.
                 * If the firmware needed to get kicked, we'll get these and
index b0182dd313464ccceb85dd19c9489fcd7b3cd9c6..d19256994e5cfefce6793dbe58b953d9c79a0504 100644 (file)
@@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
-       /* reset page to 0 */
-       ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
-       if (ret_val)
-               return ret_val;
 
        if (data & E1000_M88E1112_STATUS_LINK)
                port = E1000_MEDIA_PORT_OTHER;
@@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
        if (port && (hw->dev_spec._82575.media_port != port)) {
                hw->dev_spec._82575.media_port = port;
                hw->dev_spec._82575.media_changed = true;
+       }
+
+       if (port == E1000_MEDIA_PORT_COPPER) {
+               /* reset page to 0 */
+               ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+               if (ret_val)
+                       return ret_val;
+               igb_check_for_link_82575(hw);
        } else {
-               ret_val = igb_check_for_link_82575(hw);
+               igb_check_for_link_82575(hw);
+               /* reset page to 0 */
+               ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+               if (ret_val)
+                       return ret_val;
        }
 
        return 0;
index c1bb64d8366fa5e7741905ea9fc22d4b241054ce..987c9de247645a2d0ec1992d65703bf22a0daeb2 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@ static s32  igb_set_master_slave_mode(struct e1000_hw *hw);
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] = {
        0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
-       (sizeof(e1000_m88_cable_length_table) / \
-       sizeof(e1000_m88_cable_length_table[0]))
 
 static const u16 e1000_igp_2_cable_length_table[] = {
        0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
        60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
        83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
        104, 109, 114, 118, 121, 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
-       (sizeof(e1000_igp_2_cable_length_table) / \
-        sizeof(e1000_igp_2_cable_length_table[0]))
 
 /**
  *  igb_check_reset_block - Check if PHY reset is blocked
@@ -1700,7 +1694,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
 
        index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
                M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-       if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+       if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
                ret_val = -E1000_ERR_PHY;
                goto out;
        }
@@ -1796,7 +1790,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
 
                index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
                        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-               if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+               if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
                        ret_val = -E1000_ERR_PHY;
                        goto out;
                }
@@ -1840,7 +1834,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
        s32 ret_val = 0;
        u16 phy_data, i, agc_value = 0;
        u16 cur_agc_index, max_agc_index = 0;
-       u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+       u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
        static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
                IGP02E1000_PHY_AGC_A,
                IGP02E1000_PHY_AGC_B,
@@ -1863,7 +1857,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
                                IGP02E1000_AGC_LENGTH_MASK;
 
                /* Array index bound check. */
-               if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+               if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
                    (cur_agc_index == 0)) {
                        ret_val = -E1000_ERR_PHY;
                        goto out;
index d5673eb90c542c75f3ae4903997afc736a7d7956..b7b9c670bb3c7e24db5ff9d8e3437af06c27d317 100644 (file)
@@ -2159,6 +2159,27 @@ static int igb_set_coalesce(struct net_device *netdev,
        struct igb_adapter *adapter = netdev_priv(netdev);
        int i;
 
+       if (ec->rx_max_coalesced_frames ||
+           ec->rx_coalesce_usecs_irq ||
+           ec->rx_max_coalesced_frames_irq ||
+           ec->tx_max_coalesced_frames ||
+           ec->tx_coalesce_usecs_irq ||
+           ec->stats_block_coalesce_usecs ||
+           ec->use_adaptive_rx_coalesce ||
+           ec->use_adaptive_tx_coalesce ||
+           ec->pkt_rate_low ||
+           ec->rx_coalesce_usecs_low ||
+           ec->rx_max_coalesced_frames_low ||
+           ec->tx_coalesce_usecs_low ||
+           ec->tx_max_coalesced_frames_low ||
+           ec->pkt_rate_high ||
+           ec->rx_coalesce_usecs_high ||
+           ec->rx_max_coalesced_frames_high ||
+           ec->tx_coalesce_usecs_high ||
+           ec->tx_max_coalesced_frames_high ||
+           ec->rate_sample_interval)
+               return -ENOTSUPP;
+
        if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
            ((ec->rx_coalesce_usecs > 3) &&
             (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -2396,10 +2417,6 @@ static int igb_get_ts_info(struct net_device *dev,
                        info->rx_filters |=
                                (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
                                (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
                                (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
                return 0;
index 2f70a9b152bd1789349d9c4d995852e95be1e70d..41e27404689648a4bad220e174db32cdf1077580 100644 (file)
@@ -57,8 +57,8 @@
 #include "igb.h"
 
 #define MAJ 5
-#define MIN 2
-#define BUILD 18
+#define MIN 3
+#define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -6621,22 +6621,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                            struct sk_buff *skb)
 {
        struct page *page = rx_buffer->page;
+       unsigned char *va = page_address(page) + rx_buffer->page_offset;
        unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = IGB_RX_BUFSZ;
 #else
-       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int truesize = SKB_DATA_ALIGN(size);
 #endif
+       unsigned int pull_len;
 
-       if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
-               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+       if (unlikely(skb_is_nonlinear(skb)))
+               goto add_tail_frag;
 
-               if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-                       igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-                       va += IGB_TS_HDR_LEN;
-                       size -= IGB_TS_HDR_LEN;
-               }
+       if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+               igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+               va += IGB_TS_HDR_LEN;
+               size -= IGB_TS_HDR_LEN;
+       }
 
+       if (likely(size <= IGB_RX_HDR_LEN)) {
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
                /* page is not reserved, we can reuse buffer as-is */
@@ -6648,8 +6651,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                return false;
        }
 
+       /* we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+       /* update all of the pointers */
+       va += pull_len;
+       size -= pull_len;
+
+add_tail_frag:
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       rx_buffer->page_offset, size, truesize);
+                       (unsigned long)va & ~PAGE_MASK, size, truesize);
 
        return igb_can_reuse_rx_page(rx_buffer, page, truesize);
 }
@@ -6790,62 +6806,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
        return true;
 }
 
-/**
- *  igb_pull_tail - igb specific version of skb_pull_tail
- *  @rx_ring: rx descriptor ring packet is being transacted on
- *  @rx_desc: pointer to the EOP Rx descriptor
- *  @skb: pointer to current skb being adjusted
- *
- *  This function is an igb specific version of __pskb_pull_tail.  The
- *  main difference between this version and the original function is that
- *  this function can make several assumptions about the state of things
- *  that allow for significant optimizations versus the standard function.
- *  As a result we can do things like drop a frag and maintain an accurate
- *  truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
-                         union e1000_adv_rx_desc *rx_desc,
-                         struct sk_buff *skb)
-{
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               /* retrieve timestamp from buffer */
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
-               /* update pointers to remove timestamp header */
-               skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
-               frag->page_offset += IGB_TS_HDR_LEN;
-               skb->data_len -= IGB_TS_HDR_LEN;
-               skb->len -= IGB_TS_HDR_LEN;
-
-               /* move va to start of packet data */
-               va += IGB_TS_HDR_LEN;
-       }
-
-       /* we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-}
-
 /**
  *  igb_cleanup_headers - Correct corrupted or empty headers
  *  @rx_ring: rx descriptor ring packet is being transacted on
@@ -6873,10 +6833,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
                }
        }
 
-       /* place header in linear portion of buffer */
-       if (skb_is_nonlinear(skb))
-               igb_pull_tail(rx_ring, rx_desc, skb);
-
        /* if eth_skb_pad returns an error the skb was freed */
        if (eth_skb_pad(skb))
                return true;
index 6b87d963461462cc92885aa057b19adbb18125c0..b1e364d26aa73f7e2bbbab3a46f5eaaa01e2fd38 100644 (file)
@@ -1394,14 +1394,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
        /*
         * Continue setup of fdirctrl register bits:
         *  Turn perfect match filtering on
-        *  Report hash in RSS field of Rx wb descriptor
         *  Initialize the drop queue
         *  Move the flexible bytes to use the ethertype - shift 6 words
         *  Set the maximum length per hash bucket to 0xA filters
         *  Send interrupt when 64 (0x4 * 16) filters are left
         */
        fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
-                   IXGBE_FDIRCTRL_REPORT_STATUS |
                    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
                    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
                    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
index ec7b2324b77b42489d9892de6b785ab7ca9936cf..f7aeb560a504af040603b2c3ebfa939f5d85b285 100644 (file)
@@ -2938,14 +2938,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
                        (1 << HWTSTAMP_FILTER_NONE) |
                        (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
                        (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
                        (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
                break;
        default:
index 9aa6104e34ea8e2bd93994e4d8291763014162b8..3e6a9319c7185b52a4571cbbab61aa9dd54c422c 100644 (file)
@@ -1360,14 +1360,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
 }
 
 #endif /* CONFIG_IXGBE_DCA */
+
+#define IXGBE_RSS_L4_TYPES_MASK \
+       ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
                                 union ixgbe_adv_rx_desc *rx_desc,
                                 struct sk_buff *skb)
 {
-       if (ring->netdev->features & NETIF_F_RXHASH)
-               skb_set_hash(skb,
-                            le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
-                            PKT_HASH_TYPE_L3);
+       u16 rss_type;
+
+       if (!(ring->netdev->features & NETIF_F_RXHASH))
+               return;
+
+       rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+                  IXGBE_RXDADV_RSSTYPE_MASK;
+
+       if (!rss_type)
+               return;
+
+       skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+                    (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+                    PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
 }
 
 #ifdef IXGBE_FCOE
index 770e21a643880a9032cdc5b1e6460075acfd7250..58434584b16d9034f762ba85c71ab8251642e66e 100644 (file)
@@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed;
 #define IXGBE_RXDADV_SPLITHEADER_EN    0x00001000
 #define IXGBE_RXDADV_SPH               0x8000
 
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE              0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP          0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4              0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP          0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX           0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6              0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX       0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP          0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP          0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX       0x00000009
+
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
                                      IXGBE_RXD_ERR_CE |  \
                                      IXGBE_RXD_ERR_LE |  \
index b2f5b161d792a769bdb780154bfe5895f56ba218..d3e5f5b37999a359e1ab1d1f4468a78e9afa5527 100644 (file)
@@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-       /* We support this operation only for 82599 and x540 at the moment */
-       if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-               return IXGBEVF_82599_RETA_SIZE;
+       if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
+               return IXGBEVF_X550_VFRETA_SIZE;
 
-       return 0;
+       return IXGBEVF_82599_RETA_SIZE;
 }
 
 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
 {
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
-       /* We support this operation only for 82599 and x540 at the moment */
-       if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-               return IXGBEVF_RSS_HASH_KEY_SIZE;
-
-       return 0;
+       return IXGBEVF_RSS_HASH_KEY_SIZE;
 }
 
 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
        if (hfunc)
                *hfunc = ETH_RSS_HASH_TOP;
 
-       /* If neither indirection table nor hash key was requested - just
-        * return a success avoiding taking any locks.
-        */
-       if (!indir && !key)
-               return 0;
+       if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
+               if (key)
+                       memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
 
-       spin_lock_bh(&adapter->mbx_lock);
-       if (indir)
-               err = ixgbevf_get_reta_locked(&adapter->hw, indir,
-                                             adapter->num_rx_queues);
+               if (indir) {
+                       int i;
 
-       if (!err && key)
-               err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+                       for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
+                               indir[i] = adapter->rss_indir_tbl[i];
+               }
+       } else {
+               /* If neither indirection table nor hash key was requested
+                *  - just return a success avoiding taking any locks.
+                */
+               if (!indir && !key)
+                       return 0;
 
-       spin_unlock_bh(&adapter->mbx_lock);
+               spin_lock_bh(&adapter->mbx_lock);
+               if (indir)
+                       err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+                                                     adapter->num_rx_queues);
+
+               if (!err && key)
+                       err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+               spin_unlock_bh(&adapter->mbx_lock);
+       }
 
        return err;
 }
index 775d089009499ddfcee4d97c3a55af73528138c5..04c7ec8446e0329c71eaee8eea346a4e92836b8e 100644 (file)
@@ -144,9 +144,11 @@ struct ixgbevf_ring {
 
 #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
 #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES 2
-#define IXGBEVF_82599_RETA_SIZE        128
+#define IXGBEVF_MAX_RSS_QUEUES         2
+#define IXGBEVF_82599_RETA_SIZE                128     /* 128 entries */
+#define IXGBEVF_X550_VFRETA_SIZE       64      /* 64 entries */
 #define IXGBEVF_RSS_HASH_KEY_SIZE      40
+#define IXGBEVF_VFRSSRK_REGS           10      /* 10 registers for RSS key */
 
 #define IXGBEVF_DEFAULT_TXD    1024
 #define IXGBEVF_DEFAULT_RXD    512
@@ -447,6 +449,9 @@ struct ixgbevf_adapter {
 
        spinlock_t mbx_lock;
        unsigned long last_reset;
+
+       u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+       u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
 };
 
 enum ixbgevf_state_t {
index e71cdde9cb017aecab834d2f2d9c5d4821c3d42e..88298a3ef942e8200c2edaf7cce17ca40f18978d 100644 (file)
@@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
        napi_gro_receive(&q_vector->napi, skb);
 }
 
+#define IXGBE_RSS_L4_TYPES_MASK \
+       ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
+static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
+                                  union ixgbe_adv_rx_desc *rx_desc,
+                                  struct sk_buff *skb)
+{
+       u16 rss_type;
+
+       if (!(ring->netdev->features & NETIF_F_RXHASH))
+               return;
+
+       rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+                  IXGBE_RXDADV_RSSTYPE_MASK;
+
+       if (!rss_type)
+               return;
+
+       skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+                    (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+                    PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
 /**
  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
  * @ring: structure containig ring specific data
@@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
                                       union ixgbe_adv_rx_desc *rx_desc,
                                       struct sk_buff *skb)
 {
+       ixgbevf_rx_hash(rx_ring, rx_desc, skb);
        ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
 
        if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -648,46 +675,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
        }
 }
 
-/**
- * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an ixgbevf specific version of __pskb_pull_tail.  The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- **/
-static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
-                             struct sk_buff *skb)
-{
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       /* we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-}
-
 /**
  * ixgbevf_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
@@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
                }
        }
 
-       /* place header in linear portion of buffer */
-       if (skb_is_nonlinear(skb))
-               ixgbevf_pull_tail(rx_ring, skb);
-
        /* if eth_skb_pad returns an error the skb was freed */
        if (eth_skb_pad(skb))
                return true;
@@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
                                struct sk_buff *skb)
 {
        struct page *page = rx_buffer->page;
+       unsigned char *va = page_address(page) + rx_buffer->page_offset;
        unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = IXGBEVF_RX_BUFSZ;
 #else
        unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
 #endif
+       unsigned int pull_len;
 
-       if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
-               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+       if (unlikely(skb_is_nonlinear(skb)))
+               goto add_tail_frag;
 
+       if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
                /* page is not reserved, we can reuse buffer as is */
@@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
                return false;
        }
 
+       /* we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+       /* update all of the pointers */
+       va += pull_len;
+       size -= pull_len;
+
+add_tail_frag:
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       rx_buffer->page_offset, size, truesize);
+                       (unsigned long)va & ~PAGE_MASK, size, truesize);
 
        /* avoid re-using remote pages */
        if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -1697,22 +1696,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 vfmrqc = 0, vfreta = 0;
-       u32 rss_key[10];
        u16 rss_i = adapter->num_rx_queues;
-       int i, j;
+       u8 i, j;
 
        /* Fill out hash function seeds */
-       netdev_rss_key_fill(rss_key, sizeof(rss_key));
-       for (i = 0; i < 10; i++)
-               IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+       netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
+       for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
 
-       /* Fill out redirection table */
-       for (i = 0, j = 0; i < 64; i++, j++) {
+       for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
                if (j == rss_i)
                        j = 0;
-               vfreta = (vfreta << 8) | (j * 0x1);
-               if ((i & 3) == 3)
+
+               adapter->rss_indir_tbl[i] = j;
+
+               vfreta |= j << (i & 0x3) * 8;
+               if ((i & 3) == 3) {
                        IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+                       vfreta = 0;
+               }
        }
 
        /* Perform hash on these packet types */
index 370e20ed224c5c76eaca92954be5800d09d81ada..fe2299ac4f5c0e43b1ff3cf124381df3a1daecf1 100644 (file)
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                     struct mvneta_rx_queue *rxq)
 {
        struct net_device *dev = pp->dev;
-       int rx_done, rx_filled;
+       int rx_done;
        u32 rcvd_pkts = 0;
        u32 rcvd_bytes = 0;
 
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                rx_todo = rx_done;
 
        rx_done = 0;
-       rx_filled = 0;
 
        /* Fairness NAPI loop */
        while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                int rx_bytes, err;
 
                rx_done++;
-               rx_filled++;
                rx_status = rx_desc->status;
                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
                data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                        continue;
                }
 
+               /* Refill processing */
+               err = mvneta_rx_refill(pp, rx_desc);
+               if (err) {
+                       netdev_err(dev, "Linux processing - Can't refill\n");
+                       rxq->missed++;
+                       goto err_drop_frame;
+               }
+
                skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
                if (!skb)
                        goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
                mvneta_rx_csum(pp, rx_status, skb);
 
                napi_gro_receive(&pp->napi, skb);
-
-               /* Refill processing */
-               err = mvneta_rx_refill(pp, rx_desc);
-               if (err) {
-                       netdev_err(dev, "Linux processing - Can't refill\n");
-                       rxq->missed++;
-                       rx_filled--;
-               }
        }
 
        if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
        }
 
        /* Update rxq management counters */
-       mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
+       mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
 
        return rx_done;
 }
@@ -3029,8 +3027,8 @@ static int mvneta_probe(struct platform_device *pdev)
        const char *dt_mac_addr;
        char hw_mac_addr[ETH_ALEN];
        const char *mac_from;
+       const char *managed;
        int phy_mode;
-       int fixed_phy = 0;
        int err;
 
        /* Our multiqueue support is not complete, so for now, only
@@ -3064,7 +3062,6 @@ static int mvneta_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "cannot register fixed PHY\n");
                        goto err_free_irq;
                }
-               fixed_phy = 1;
 
                /* In the case of a fixed PHY, the DT node associated
                 * to the PHY is the Ethernet MAC DT node.
@@ -3088,8 +3085,10 @@ static int mvneta_probe(struct platform_device *pdev)
        pp = netdev_priv(dev);
        pp->phy_node = phy_node;
        pp->phy_interface = phy_mode;
-       pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
-                               fixed_phy;
+
+       err = of_property_read_string(dn, "managed", &managed);
+       pp->use_inband_status = (err == 0 &&
+                                strcmp(managed, "in-band-status") == 0);
 
        pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
index 52a6665b7abf4f248b89f60e3408c1f020daaa85..d54701047401d73eeb54feca8cbc48c27ab5463e 100644 (file)
@@ -18,5 +18,6 @@ if NET_VENDOR_MELLANOX
 
 source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
 source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
index 38fe32ef5e5f438d713e840f6b83332f19e6a6a8..2e2a5ec509ac520bfddd3348e6fa08e373a37167 100644 (file)
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
index 82040137d7d9723a0ab027fe72bc25a3063368b3..0a3202047569c707a28f62376466e72fdcd8cd00 100644 (file)
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
 {
        struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
        struct mlx4_cmd_context *context;
+       long ret_wait;
        int err = 0;
 
        down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
        if (err)
                goto out_reset;
 
-       if (!wait_for_completion_timeout(&context->done,
-                                        msecs_to_jiffies(timeout))) {
+       if (op == MLX4_CMD_SENSE_PORT) {
+               ret_wait =
+                       wait_for_completion_interruptible_timeout(&context->done,
+                                                                 msecs_to_jiffies(timeout));
+               if (ret_wait < 0) {
+                       context->fw_status = 0;
+                       context->out_param = 0;
+                       context->result = 0;
+               }
+       } else {
+               ret_wait = (long)wait_for_completion_timeout(&context->done,
+                                                            msecs_to_jiffies(timeout));
+       }
+       if (!ret_wait) {
                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
                          op);
                if (op == MLX4_CMD_NOP) {
index 99ba1c50e5851769fa58ddee4522c2e6062db0a8..f79d8124321e525b04de13e7ad1509105b789a2d 100644 (file)
@@ -102,6 +102,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
 
 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
        "blueflame",
+       "phv-bit"
 };
 
 static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1798,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
 static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
        bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
        bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+       bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+       bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
        int i;
+       int ret = 0;
 
-       if (bf_enabled_new == bf_enabled_old)
-               return 0; /* Nothing to do */
+       if (bf_enabled_new != bf_enabled_old) {
+               if (bf_enabled_new) {
+                       bool bf_supported = true;
 
-       if (bf_enabled_new) {
-               bool bf_supported = true;
+                       for (i = 0; i < priv->tx_ring_num; i++)
+                               bf_supported &= priv->tx_ring[i]->bf_alloced;
 
-               for (i = 0; i < priv->tx_ring_num; i++)
-                       bf_supported &= priv->tx_ring[i]->bf_alloced;
+                       if (!bf_supported) {
+                               en_err(priv, "BlueFlame is not supported\n");
+                               return -EINVAL;
+                       }
 
-               if (!bf_supported) {
-                       en_err(priv, "BlueFlame is not supported\n");
-                       return -EINVAL;
+                       priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+               } else {
+                       priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
                }
 
-               priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
-       } else {
-               priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
-       }
-
-       for (i = 0; i < priv->tx_ring_num; i++)
-               priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+               for (i = 0; i < priv->tx_ring_num; i++)
+                       priv->tx_ring[i]->bf_enabled = bf_enabled_new;
 
-       en_info(priv, "BlueFlame %s\n",
-               bf_enabled_new ?  "Enabled" : "Disabled");
+               en_info(priv, "BlueFlame %s\n",
+                       bf_enabled_new ?  "Enabled" : "Disabled");
+       }
 
+       if (phv_enabled_new != phv_enabled_old) {
+               ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+               if (ret)
+                       return ret;
+               else if (phv_enabled_new)
+                       priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+               else
+                       priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+               en_info(priv, "PHV bit %s\n",
+                       phv_enabled_new ?  "Enabled" : "Disabled");
+       }
        return 0;
 }
 
index e0de2fd1ce124d3d668659b89544d172164037f4..4726122ea76b296f4b45258cb8ee9358c668ff60 100644 (file)
@@ -2184,6 +2184,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
 }
 
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+                                             netdev_features_t features)
+{
+       struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+       struct mlx4_en_dev *mdev = en_priv->mdev;
+
+       /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+        * enable/disable make sure S-TAG flag is always in same state as
+        * C-TAG.
+        */
+       if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+           !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+               features |= NETIF_F_HW_VLAN_STAG_RX;
+       else
+               features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+       return features;
+}
+
 static int mlx4_en_set_features(struct net_device *netdev,
                netdev_features_t features)
 {
@@ -2218,6 +2237,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
                en_info(priv, "Turn %s TX vlan strip offload\n",
                        (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
 
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+               en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+                       (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
        if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
                en_info(priv, "Turn %s loopback\n",
                        (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2483,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_poll_controller    = mlx4_en_netpoll,
 #endif
        .ndo_set_features       = mlx4_en_set_features,
+       .ndo_fix_features       = mlx4_en_fix_features,
        .ndo_setup_tc           = mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
@@ -2500,6 +2524,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_poll_controller    = mlx4_en_netpoll,
 #endif
        .ndo_set_features       = mlx4_en_set_features,
+       .ndo_fix_features       = mlx4_en_fix_features,
        .ndo_setup_tc           = mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
@@ -2931,6 +2956,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->hw_features |= NETIF_F_LOOPBACK |
                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
+       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+               dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+                       NETIF_F_HW_VLAN_STAG_FILTER;
+               dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+       }
+
+       if (mlx4_is_slave(mdev->dev)) {
+               int phv;
+
+               err = get_phv_bit(mdev->dev, port, &phv);
+               if (!err && phv) {
+                       dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+                       priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+               }
+       } else {
+               if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+                   !(mdev->dev->caps.flags2 &
+                     MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+                       dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+       }
+
        if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
                dev->hw_features |= NETIF_F_RXFCS;
 
index 7a4f20bb7fcb4c2640ad8111f5a98ff95088075c..4402a1e48c9bb9d8153df9e9f9378d5e4ece38b3 100644 (file)
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
 
 static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
 {
-       BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
        return ring->prod == ring->cons;
 }
 
@@ -726,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
 
        hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
 
-       if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+       if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
            !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
                hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
                hdr += sizeof(struct vlan_hdr);
@@ -907,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                gro_skb->csum_level = 1;
 
                        if ((cqe->vlan_my_qpn &
-                           cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+                           cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
                            (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                                u16 vid = be16_to_cpu(cqe->sl_vid);
 
                                __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+                       } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+                                 MLX4_CQE_SVLAN_PRESENT_MASK) &&
+                                (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+                               __vlan_hwaccel_put_tag(gro_skb,
+                                                      htons(ETH_P_8021AD),
+                                                      be16_to_cpu(cqe->sl_vid));
                        }
 
                        if (dev->features & NETIF_F_RXHASH)
                                skb_set_hash(gro_skb,
                                             be32_to_cpu(cqe->immed_rss_invalid),
-                                            PKT_HASH_TYPE_L3);
+                                            (ip_summed == CHECKSUM_UNNECESSARY) ?
+                                               PKT_HASH_TYPE_L4 :
+                                               PKT_HASH_TYPE_L3);
 
                        skb_record_rx_queue(gro_skb, cq->ring);
                        skb_mark_napi_id(gro_skb, &cq->napi);
@@ -963,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                if (dev->features & NETIF_F_RXHASH)
                        skb_set_hash(skb,
                                     be32_to_cpu(cqe->immed_rss_invalid),
-                                    PKT_HASH_TYPE_L3);
+                                    (ip_summed == CHECKSUM_UNNECESSARY) ?
+                                       PKT_HASH_TYPE_L4 :
+                                       PKT_HASH_TYPE_L3);
 
                if ((be32_to_cpu(cqe->vlan_my_qpn) &
-                   MLX4_CQE_VLAN_PRESENT_MASK) &&
+                   MLX4_CQE_CVLAN_PRESENT_MASK) &&
                    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+               else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+                         MLX4_CQE_SVLAN_PRESENT_MASK) &&
+                        (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+                                              be16_to_cpu(cqe->sl_vid));
 
                if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
                        timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1066,7 +1080,10 @@ static const int frag_sizes[] = {
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+       /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+        * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+        */
+       int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
        int buf_size = 0;
        int i = 0;
 
index c10d98f6ad967b13640b5d9b2fe033f377565ff0..494e7762fdb19efb83d76f187b88fd37d422d5b5 100644 (file)
@@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        u32 index, bf_index;
        __be32 op_own;
        u16 vlan_tag = 0;
+       u16 vlan_proto = 0;
        int i_frag;
        int lso_header_size;
        void *fragptr = NULL;
@@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_drop;
        }
 
-       if (skb_vlan_tag_present(skb))
+       if (skb_vlan_tag_present(skb)) {
                vlan_tag = skb_vlan_tag_get(skb);
-
+               vlan_proto = be16_to_cpu(skb->vlan_proto);
+       }
 
        netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
 
@@ -958,8 +960,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                ring->bf.offset ^= ring->bf.buf_size;
        } else {
                tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-               tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
-                       !!skb_vlan_tag_present(skb);
+               if (vlan_proto == ETH_P_8021AD)
+                       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+               else if (vlan_proto == ETH_P_8021Q)
+                       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+
                tx_desc->ctrl.fence_size = real_size;
 
                /* Ensure new descriptor hits memory
index aae13adfb492b885bcf2fba03b042b949c3f5575..8e81e53c370e7d54e6367c012212cccc73ee26fd 100644 (file)
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                        continue;
                                                mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
                                                         __func__, i, port);
-                                               s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+                                               s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
                                                if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
                                                        eqe->event.port_change.port =
                                                                cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                        continue;
                                                if (i == mlx4_master_func_num(dev))
                                                        continue;
-                                               s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
+                                               s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
                                                if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
                                                        eqe->event.port_change.port =
                                                                cpu_to_be32(
index e30bf57ad7a18ff559eb4bba122252eaf0308964..e8ec1dec5789a8d80499e8c478e4822567480284 100644 (file)
@@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [26] = "Port ETS Scheduler support",
                [27] = "Port beacon support",
                [28] = "RX-ALL support",
+               [29] = "802.1ad offload support",
        };
        int i;
 
@@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 
 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT                 0x40
 
        if (vhcr->op_modifier == 1) {
                struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
                MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
                         QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+               if (dev->caps.phv_bit[port]) {
+                       field = QUERY_FUNC_CAP_PHV_BIT;
+                       MLX4_PUT(outbox->buf, field,
+                                QUERY_FUNC_CAP_FLAGS0_OFFSET);
+               }
+
        } else if (vhcr->op_modifier == 0) {
                struct mlx4_active_ports actv_ports =
                        mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
                MLX4_GET(func_cap->phys_port_id, outbox,
                         QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+       func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
        /* All other resources are allocated by the master, but we still report
         * 'num' and 'reserved' capabilities as follows:
         * - num remains the maximum resource index
@@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET    0x92
 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET                0x94
 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET                0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET            0x96
 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET         0x98
 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET                0xa0
 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET                0x9c
@@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
        if (field & (1 << 2))
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+       if (field & 0x80)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+       if (field & 0x40)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
        MLX4_GET(dev_cap->reserved_lkey, outbox,
                 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
        MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
        MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
        MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
 
+       /* phv_check enable */
+       MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+       if (byte_field & 0x2)
+               param->phv_check_en = 1;
 out:
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -2758,3 +2780,63 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
                            0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
                            MLX4_CMD_NATIVE);
 }
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID 0x10
+#define SET_PORT_GEN_PHV_EN    0x80
+
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_general_context *context;
+       u32 in_mod;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+
+       context->v_ignore_fcs |=  SET_PORT_GEN_PHV_VALID;
+       if (phv_bit)
+               context->phv_en |=  SET_PORT_GEN_PHV_EN;
+
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+       int err;
+       struct mlx4_func_cap func_cap;
+
+       memset(&func_cap, 0, sizeof(func_cap));
+       err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+       if (!err)
+               *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+       return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+       int ret;
+
+       if (mlx4_is_slave(dev))
+               return -EPERM;
+
+       if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+           !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+               ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+               if (!ret)
+                       dev->caps.phv_bit[port] = new_val;
+               return ret;
+       }
+
+       return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
index 07cb7c2461adaa90cbfab5e478a6a82d14613f87..08de5555c2f4d032bd61cf71689ef3cd0d8edd81 100644 (file)
@@ -204,6 +204,7 @@ struct mlx4_init_hca_param {
        u16 cqe_size; /* For use only when CQE stride feature enabled */
        u16 eqe_size; /* For use only when EQE stride feature enabled */
        u8 rss_ip_frags;
+       u8 phv_check_en; /* for QUERY_HCA */
 };
 
 struct mlx4_init_ib_param {
index 12fbfcb44d8acdedf08fef880a12e53145f8836e..121c579888bba15789c96584cab45bd2c738f709 100644 (file)
@@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
        dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
 
+       if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+               struct mlx4_init_hca_param hca_param;
+
+               memset(&hca_param, 0, sizeof(hca_param));
+               err = mlx4_QUERY_HCA(dev, &hca_param);
+               /* Turn off PHV_EN flag in case phv_check_en is set.
+                * phv_check_en is a HW check that parse the packet and verify
+                * phv bit was reported correctly in the wqe. To allow QinQ
+                * PHV_EN flag should be set and phv_check_en must be cleared
+                * otherwise QinQ packets will be drop by the HW.
+                */
+               if (err || hca_param.phv_check_en)
+                       dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+       }
+
        /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
        if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
                dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -2273,6 +2288,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
                } else if (err == -ENOENT) {
                        err = 0;
                        continue;
+               } else if (mlx4_is_slave(dev) && err == -EINVAL) {
+                       priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
+                       mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
+                                 MLX4_SINK_COUNTER_INDEX(dev));
+                       err = 0;
                } else {
                        mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
                                 __func__, port + 1, err);
@@ -2907,6 +2927,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
 {
        u64 dev_flags = dev->flags;
        int err = 0;
+       int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+                                       MLX4_MAX_NUM_VF);
 
        if (reset_flow) {
                dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2932,6 +2954,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
        }
 
        if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
+               if (total_vfs > fw_enabled_sriov_vfs) {
+                       mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+                                total_vfs, fw_enabled_sriov_vfs);
+                       err = -ENOMEM;
+                       goto disable_sriov;
+               }
                mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
                err = pci_enable_sriov(pdev, total_vfs);
        }
@@ -3413,20 +3441,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
                        goto err_disable_pdev;
                }
        }
-       if (total_vfs >= MLX4_MAX_NUM_VF) {
+       if (total_vfs > MLX4_MAX_NUM_VF) {
                dev_err(&pdev->dev,
-                       "Requested more VF's (%d) than allowed (%d)\n",
-                       total_vfs, MLX4_MAX_NUM_VF - 1);
+                       "Requested more VF's (%d) than allowed by hw (%d)\n",
+                       total_vfs, MLX4_MAX_NUM_VF);
                err = -EINVAL;
                goto err_disable_pdev;
        }
 
        for (i = 0; i < MLX4_MAX_PORTS; i++) {
-               if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+               if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
                        dev_err(&pdev->dev,
-                               "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+                               "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
                                nvfs[i] + nvfs[2], i + 1,
-                               MLX4_MAX_NUM_VF_P_PORT - 1);
+                               MLX4_MAX_NUM_VF_P_PORT);
                        err = -EINVAL;
                        goto err_disable_pdev;
                }
index a092c5c34d4375df330c1f8fea2b01d2545580dd..232b2b55f23b9170b32f351926c200b6ac0e7f1c 100644 (file)
@@ -787,6 +787,9 @@ struct mlx4_set_port_general_context {
        u8 pprx;
        u8 pfcrx;
        u16 reserved4;
+       u32 reserved5;
+       u8 phv_en;
+       u8 reserved6[3];
 };
 
 struct mlx4_set_port_rqp_calc_context {
index 666d1669eb5233f9a8e6baf5773621159375af25..defcf8c395bface7f024043cc51484bd7a4f3820 100644 (file)
@@ -95,6 +95,7 @@
  */
 
 #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV      2
 
 #define MLX4_EN_WATCHDOG_TIMEOUT       (15 * HZ)
 
index 0715b497511f6c861f5ac027341960fdc0acfab5..6cb38304669f6e5618edfea860a8c8d5f49e5c54 100644 (file)
  * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+                                          size_t size, dma_addr_t *dma_handle,
+                                          int node)
+{
+       struct mlx5_priv *priv = &dev->priv;
+       int original_node;
+       void *cpu_handle;
+
+       mutex_lock(&priv->alloc_mutex);
+       original_node = dev_to_node(&dev->pdev->dev);
+       set_dev_node(&dev->pdev->dev, node);
+       cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+                                        dma_handle, GFP_KERNEL);
+       set_dev_node(&dev->pdev->dev, original_node);
+       mutex_unlock(&priv->alloc_mutex);
+       return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+                       struct mlx5_buf *buf, int node)
 {
        dma_addr_t t;
 
        buf->size = size;
        buf->npages       = 1;
        buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-       buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
-                                               size, &t, GFP_KERNEL);
+       buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
+                                                         &t, node);
        if (!buf->direct.buf)
                return -ENOMEM;
 
@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
 
        return 0;
 }
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+       return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+                                                int node)
 {
        struct mlx5_db_pgdir *pgdir;
 
@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
                return NULL;
 
        bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
-       pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
-                                           &pgdir->db_dma, GFP_KERNEL);
+
+       pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+                                                      &pgdir->db_dma, node);
        if (!pgdir->db_page) {
                kfree(pgdir);
                return NULL;
@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
        return 0;
 }
 
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
 {
        struct mlx5_db_pgdir *pgdir;
        int ret = 0;
@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
                if (!mlx5_alloc_db_from_pgdir(pgdir, db))
                        goto out;
 
-       pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+       pgdir = mlx5_alloc_db_pgdir(dev, node);
        if (!pgdir) {
                ret = -ENOMEM;
                goto out;
@@ -145,6 +171,12 @@ out:
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+       return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
 
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
index 3d23bd657e3c0cf7dc6d1d61c530a2d15053712a..45f6dc75c0df99c9d7a8f92c976bd014bdad24f1 100644 (file)
@@ -60,6 +60,7 @@
 
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET             16
 
 static const char vport_strings[][ETH_GSTRING_LEN] = {
        /* vport statistics */
@@ -195,6 +196,8 @@ struct mlx5e_params {
        u16 rx_hash_log_tbl_sz;
        bool lro_en;
        u32 lro_wqe_sz;
+       u8  rss_hfunc;
+       u16 tx_max_inline;
 };
 
 enum {
@@ -266,7 +269,9 @@ struct mlx5e_sq {
        /* dirtied @xmit */
        u16                        pc ____cacheline_aligned_in_smp;
        u32                        dma_fifo_pc;
-       u32                        bf_offset;
+       u16                        bf_offset;
+       u16                        prev_cc;
+       u8                         bf_budget;
        struct mlx5e_sq_stats      stats;
 
        struct mlx5e_cq            cq;
@@ -279,9 +284,10 @@ struct mlx5e_sq {
        struct mlx5_wq_cyc         wq;
        u32                        dma_fifo_mask;
        void __iomem              *uar_map;
+       void __iomem              *uar_bf_map;
        struct netdev_queue       *txq;
        u32                        sqn;
-       u32                        bf_buf_size;
+       u16                        bf_buf_size;
        u16                        max_inline;
        u16                        edge;
        struct device             *pdev;
@@ -324,14 +330,18 @@ struct mlx5e_channel {
 };
 
 enum mlx5e_traffic_types {
-       MLX5E_TT_IPV4_TCP = 0,
-       MLX5E_TT_IPV6_TCP = 1,
-       MLX5E_TT_IPV4_UDP = 2,
-       MLX5E_TT_IPV6_UDP = 3,
-       MLX5E_TT_IPV4     = 4,
-       MLX5E_TT_IPV6     = 5,
-       MLX5E_TT_ANY      = 6,
-       MLX5E_NUM_TT      = 7,
+       MLX5E_TT_IPV4_TCP,
+       MLX5E_TT_IPV6_TCP,
+       MLX5E_TT_IPV4_UDP,
+       MLX5E_TT_IPV6_UDP,
+       MLX5E_TT_IPV4_IPSEC_AH,
+       MLX5E_TT_IPV6_IPSEC_AH,
+       MLX5E_TT_IPV4_IPSEC_ESP,
+       MLX5E_TT_IPV6_IPSEC_ESP,
+       MLX5E_TT_IPV4,
+       MLX5E_TT_IPV6,
+       MLX5E_TT_ANY,
+       MLX5E_NUM_TT,
 };
 
 enum {
@@ -379,7 +389,6 @@ struct mlx5e_flow_table {
 
 struct mlx5e_priv {
        /* priv data path fields - start */
-       int                        num_tc;
        int                        default_vlan_prio;
        struct mlx5e_sq            **txq_to_sq_map;
        /* priv data path fields - end */
@@ -487,12 +496,12 @@ void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-                            struct mlx5e_params *new_params);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-                                     struct mlx5e_tx_wqe *wqe)
+                                     struct mlx5e_tx_wqe *wqe, int bf_sz)
 {
+       u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
        /* ensure wqe is visible to device before updating doorbell record */
        dma_wmb();
 
@@ -503,9 +512,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
         */
        wmb();
 
-       mlx5_write64((__be32 *)&wqe->ctrl,
-                    sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
-                    NULL);
+       if (bf_sz) {
+               __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+               /* flush the write-combining mapped buffer */
+               wmb();
+
+       } else {
+               mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+       }
 
        sq->bf_offset ^= sq->bf_buf_size;
 }
@@ -519,3 +534,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
 }
 
 extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
index 388938482ff99dabdd0f95229c738dfd915a0c7f..b95aa3384c367cda65fd6a875553cad0c8638d69 100644 (file)
@@ -173,7 +173,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
        case ETH_SS_STATS:
                return NUM_VPORT_COUNTERS +
                       priv->params.num_channels * NUM_RQ_STATS +
-                      priv->params.num_channels * priv->num_tc *
+                      priv->params.num_channels * priv->params.num_tc *
                                                   NUM_SQ_STATS;
        /* fallthrough */
        default:
@@ -207,7 +207,7 @@ static void mlx5e_get_strings(struct net_device *dev,
                                        "rx%d_%s", i, rq_stats_strings[j]);
 
                for (i = 0; i < priv->params.num_channels; i++)
-                       for (tc = 0; tc < priv->num_tc; tc++)
+                       for (tc = 0; tc < priv->params.num_tc; tc++)
                                for (j = 0; j < NUM_SQ_STATS; j++)
                                        sprintf(data +
                                                (idx++) * ETH_GSTRING_LEN,
@@ -242,7 +242,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                                       ((u64 *)&priv->channel[i]->rq.stats)[j];
 
        for (i = 0; i < priv->params.num_channels; i++)
-               for (tc = 0; tc < priv->num_tc; tc++)
+               for (tc = 0; tc < priv->params.num_tc; tc++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
                                data[idx++] = !test_bit(MLX5E_STATE_OPENED,
                                                        &priv->state) ? 0 :
@@ -264,7 +264,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                               struct ethtool_ringparam *param)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       struct mlx5e_params new_params;
+       bool was_opened;
        u16 min_rx_wqes;
        u8 log_rq_size;
        u8 log_sq_size;
@@ -316,11 +316,18 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                return 0;
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->params;
-       new_params.log_rq_size = log_rq_size;
-       new_params.log_sq_size = log_sq_size;
-       new_params.min_rx_wqes = min_rx_wqes;
-       err = mlx5e_update_priv_params(priv, &new_params);
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(dev);
+
+       priv->params.log_rq_size = log_rq_size;
+       priv->params.log_sq_size = log_sq_size;
+       priv->params.min_rx_wqes = min_rx_wqes;
+
+       if (was_opened)
+               err = mlx5e_open_locked(dev);
+
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -342,7 +349,7 @@ static int mlx5e_set_channels(struct net_device *dev,
        struct mlx5e_priv *priv = netdev_priv(dev);
        int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
        unsigned int count = ch->combined_count;
-       struct mlx5e_params new_params;
+       bool was_opened;
        int err = 0;
 
        if (!count) {
@@ -365,9 +372,16 @@ static int mlx5e_set_channels(struct net_device *dev,
                return 0;
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->params;
-       new_params.num_channels = count;
-       err = mlx5e_update_priv_params(priv, &new_params);
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(dev);
+
+       priv->params.num_channels = count;
+
+       if (was_opened)
+               err = mlx5e_open_locked(dev);
+
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -662,6 +676,101 @@ out:
        return err;
 }
 
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+                         u8 *hfunc)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       if (hfunc)
+               *hfunc = priv->params.rss_hfunc;
+
+       return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+                         const u8 *key, const u8 hfunc)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int err = 0;
+
+       if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+               return 0;
+
+       if ((hfunc != ETH_RSS_HASH_XOR) &&
+           (hfunc != ETH_RSS_HASH_TOP))
+               return -EINVAL;
+
+       mutex_lock(&priv->state_lock);
+
+       priv->params.rss_hfunc = hfunc;
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               mlx5e_close_locked(dev);
+               err = mlx5e_open_locked(dev);
+       }
+
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+                            const struct ethtool_tunable *tuna,
+                            void *data)
+{
+       const struct mlx5e_priv *priv = netdev_priv(dev);
+       int err = 0;
+
+       switch (tuna->id) {
+       case ETHTOOL_TX_COPYBREAK:
+               *(u32 *)data = priv->params.tx_max_inline;
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+                            const struct ethtool_tunable *tuna,
+                            const void *data)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       bool was_opened;
+       u32 val;
+       int err = 0;
+
+       switch (tuna->id) {
+       case ETHTOOL_TX_COPYBREAK:
+               val = *(u32 *)data;
+               if (val > mlx5e_get_max_inline_cap(mdev)) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               mutex_lock(&priv->state_lock);
+
+               was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+               if (was_opened)
+                       mlx5e_close_locked(dev);
+
+               priv->params.tx_max_inline = val;
+
+               if (was_opened)
+                       err = mlx5e_open_locked(dev);
+
+               mutex_unlock(&priv->state_lock);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
 const struct ethtool_ops mlx5e_ethtool_ops = {
        .get_drvinfo       = mlx5e_get_drvinfo,
        .get_link          = ethtool_op_get_link,
@@ -676,4 +785,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
        .set_coalesce      = mlx5e_set_coalesce,
        .get_settings      = mlx5e_get_settings,
        .set_settings      = mlx5e_set_settings,
+       .get_rxfh          = mlx5e_get_rxfh,
+       .set_rxfh          = mlx5e_set_rxfh,
+       .get_tunable       = mlx5e_get_tunable,
+       .set_tunable       = mlx5e_set_tunable,
 };
index 120db80c47aac425bf3c9d2eaac8b6ccd4cd674b..70ec31b9e1e96b135829430df45a416444a21a11 100644 (file)
@@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
 {
        void *ft = priv->ft.main;
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+               mlx5_del_flow_table_entry(ft,
+                                         ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+               mlx5_del_flow_table_entry(ft,
+                                         ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+               mlx5_del_flow_table_entry(ft,
+                                         ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+               mlx5_del_flow_table_entry(ft,
+                                         ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+       if (ai->tt_vec & BIT(MLX5E_TT_ANY))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
 }
 
@@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
                switch (eth_addr_type) {
                case MLX5E_UC:
                        ret =
-                               (1 << MLX5E_TT_IPV4_TCP) |
-                               (1 << MLX5E_TT_IPV6_TCP) |
-                               (1 << MLX5E_TT_IPV4_UDP) |
-                               (1 << MLX5E_TT_IPV6_UDP) |
-                               (1 << MLX5E_TT_IPV4)     |
-                               (1 << MLX5E_TT_IPV6)     |
-                               (1 << MLX5E_TT_ANY)      |
+                               BIT(MLX5E_TT_IPV4_TCP)       |
+                               BIT(MLX5E_TT_IPV6_TCP)       |
+                               BIT(MLX5E_TT_IPV4_UDP)       |
+                               BIT(MLX5E_TT_IPV6_UDP)       |
+                               BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+                               BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+                               BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+                               BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+                               BIT(MLX5E_TT_IPV4)           |
+                               BIT(MLX5E_TT_IPV6)           |
+                               BIT(MLX5E_TT_ANY)            |
                                0;
                        break;
 
                case MLX5E_MC_IPV4:
                        ret =
-                               (1 << MLX5E_TT_IPV4_UDP) |
-                               (1 << MLX5E_TT_IPV4)     |
+                               BIT(MLX5E_TT_IPV4_UDP)       |
+                               BIT(MLX5E_TT_IPV4)           |
                                0;
                        break;
 
                case MLX5E_MC_IPV6:
                        ret =
-                               (1 << MLX5E_TT_IPV6_UDP) |
-                               (1 << MLX5E_TT_IPV6)     |
+                               BIT(MLX5E_TT_IPV6_UDP)       |
+                               BIT(MLX5E_TT_IPV6)           |
                                0;
                        break;
 
                case MLX5E_MC_OTHER:
                        ret =
-                               (1 << MLX5E_TT_ANY)      |
+                               BIT(MLX5E_TT_ANY)            |
                                0;
                        break;
                }
@@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
 
        case MLX5E_ALLMULTI:
                ret =
-                       (1 << MLX5E_TT_IPV4_UDP) |
-                       (1 << MLX5E_TT_IPV6_UDP) |
-                       (1 << MLX5E_TT_IPV4)     |
-                       (1 << MLX5E_TT_IPV6)     |
-                       (1 << MLX5E_TT_ANY)      |
+                       BIT(MLX5E_TT_IPV4_UDP) |
+                       BIT(MLX5E_TT_IPV6_UDP) |
+                       BIT(MLX5E_TT_IPV4)     |
+                       BIT(MLX5E_TT_IPV6)     |
+                       BIT(MLX5E_TT_ANY)      |
                        0;
                break;
 
        default: /* MLX5E_PROMISC */
                ret =
-                       (1 << MLX5E_TT_IPV4_TCP) |
-                       (1 << MLX5E_TT_IPV6_TCP) |
-                       (1 << MLX5E_TT_IPV4_UDP) |
-                       (1 << MLX5E_TT_IPV6_UDP) |
-                       (1 << MLX5E_TT_IPV4)     |
-                       (1 << MLX5E_TT_IPV6)     |
-                       (1 << MLX5E_TT_ANY)      |
+                       BIT(MLX5E_TT_IPV4_TCP)       |
+                       BIT(MLX5E_TT_IPV6_TCP)       |
+                       BIT(MLX5E_TT_IPV4_UDP)       |
+                       BIT(MLX5E_TT_IPV6_UDP)       |
+                       BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+                       BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+                       BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+                       BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+                       BIT(MLX5E_TT_IPV4)           |
+                       BIT(MLX5E_TT_IPV6)           |
+                       BIT(MLX5E_TT_ANY)            |
                        0;
                break;
        }
@@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
        u8   *match_criteria_dmac;
        void *ft   = priv->ft.main;
        u32  *tirn = priv->tirn;
+       u32  *ft_ix;
        u32  tt_vec;
        int  err;
 
@@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
 
        tt_vec = mlx5e_get_tt_vec(ai, type);
 
-       if (tt_vec & (1 << MLX5E_TT_ANY)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+       if (tt_vec & BIT(MLX5E_TT_ANY)) {
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_ANY]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_ANY]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_ANY);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_ANY);
        }
 
        match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        MLX5_SET_TO_ONES(fte_match_param, match_criteria,
                         outer_headers.ethertype);
 
-       if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+       if (tt_vec & BIT(MLX5E_TT_IPV4)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IP);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV4]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV4]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4);
        }
 
-       if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+       if (tt_vec & BIT(MLX5E_TT_IPV6)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IPV6);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV6]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV6]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6);
        }
 
        MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
        MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
                 IPPROTO_UDP);
 
-       if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+       if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IP);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV4_UDP]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
        }
 
-       if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+       if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IPV6);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV6_UDP]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
        }
 
        MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
                 IPPROTO_TCP);
 
-       if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+       if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IP);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV4_TCP]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
        }
 
-       if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+       if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IPV6);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV6_TCP]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+       }
+
+       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+                IPPROTO_AH);
+
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+       }
+
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+       }
+
+       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+                IPPROTO_ESP);
+
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+       }
+
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
        }
 
        return 0;
+
+err_del_ai:
+       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+       return err;
 }
 
 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -725,7 +821,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
        if (!g)
                return -ENOMEM;
 
-       g[0].log_sz = 2;
+       g[0].log_sz = 3;
        g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
                         outer_headers.ethertype);
index 40206da1f9d7b9e24ebd972e2b798f05dc4be46b..bb815893d3a8b40cb22f0a938b14487b028a9f56 100644 (file)
@@ -41,6 +41,7 @@ struct mlx5e_rq_param {
 struct mlx5e_sq_param {
        u32                        sqc[MLX5_ST_SZ_DW(sqc)];
        struct mlx5_wq_param       wq;
+       u16                        max_inline;
 };
 
 struct mlx5e_cq_param {
@@ -116,7 +117,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_wqe_err   += rq_stats->wqe_err;
 
-               for (j = 0; j < priv->num_tc; j++) {
+               for (j = 0; j < priv->params.num_tc; j++) {
                        sq_stats = &priv->channel[i]->sq[j].stats;
 
                        s->tso_packets          += sq_stats->tso_packets;
@@ -272,6 +273,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
        int err;
        int i;
 
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
+
        err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
                                &rq->wq_ctrl);
        if (err)
@@ -342,11 +345,11 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 
        memcpy(rqc, param->rqc, sizeof(param->rqc));
 
-       MLX5_SET(rqc,  rqc, cqn,                c->rq.cq.mcq.cqn);
+       MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
        MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
-                                               PAGE_SHIFT);
+                                               MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
 
        mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -502,6 +505,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        if (err)
                return err;
 
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
+
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
                                 &sq->wq_ctrl);
        if (err)
@@ -509,7 +514,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
 
        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
        sq->uar_map     = sq->uar.map;
+       sq->uar_bf_map  = sq->uar.bf_map;
        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+       sq->max_inline  = param->max_inline;
 
        err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
        if (err)
@@ -518,11 +525,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        txq_ix = c->ix + tc * priv->params.num_channels;
        sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
 
-       sq->pdev    = c->pdev;
-       sq->mkey_be = c->mkey_be;
-       sq->channel = c;
-       sq->tc      = tc;
-       sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+       sq->pdev      = c->pdev;
+       sq->mkey_be   = c->mkey_be;
+       sq->channel   = c;
+       sq->tc        = tc;
+       sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+       sq->bf_budget = MLX5E_SQ_BF_BUDGET;
        priv->txq_to_sq_map[txq_ix] = sq;
 
        return 0;
@@ -569,7 +577,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
 
        memcpy(sqc, param->sqc, sizeof(param->sqc));
 
-       MLX5_SET(sqc,  sqc, user_index,         sq->tc);
        MLX5_SET(sqc,  sqc, tis_num_0,          priv->tisn[sq->tc]);
        MLX5_SET(sqc,  sqc, cqn,                c->sq[sq->tc].cq.mcq.cqn);
        MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
@@ -579,7 +586,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
        MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
        MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
-                                         PAGE_SHIFT);
+                                         MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
 
        mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +709,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
        int err;
        u32 i;
 
-       param->wq.numa = cpu_to_node(c->cpu);
+       param->wq.buf_numa_node = cpu_to_node(c->cpu);
+       param->wq.db_numa_node  = cpu_to_node(c->cpu);
        param->eq_ix   = c->ix;
 
        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -773,7 +781,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
-                                           PAGE_SHIFT);
+                                           MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 
        err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -929,7 +937,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->pdev     = &priv->mdev->pdev->dev;
        c->netdev   = priv->netdev;
        c->mkey_be  = cpu_to_be32(priv->mr.key);
-       c->num_tc   = priv->num_tc;
+       c->num_tc   = priv->params.num_tc;
 
        mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
 
@@ -1000,7 +1008,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
        MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
        MLX5_SET(wq, wq, pd,               priv->pdn);
 
-       param->wq.numa   = dev_to_node(&priv->mdev->pdev->dev);
+       param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
        param->wq.linear = 1;
 }
 
@@ -1014,7 +1022,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
        MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
        MLX5_SET(wq, wq, pd,            priv->pdn);
 
-       param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+       param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+       param->max_inline = priv->params.tx_max_inline;
 }
 
 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1068,28 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
 static int mlx5e_open_channels(struct mlx5e_priv *priv)
 {
        struct mlx5e_channel_param cparam;
+       int nch = priv->params.num_channels;
        int err = -ENOMEM;
        int i;
        int j;
 
-       priv->channel = kcalloc(priv->params.num_channels,
-                               sizeof(struct mlx5e_channel *), GFP_KERNEL);
+       priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+                               GFP_KERNEL);
 
-       priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+       priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
                                      sizeof(struct mlx5e_sq *), GFP_KERNEL);
 
        if (!priv->channel || !priv->txq_to_sq_map)
                goto err_free_txq_to_sq_map;
 
        mlx5e_build_channel_param(priv, &cparam);
-       for (i = 0; i < priv->params.num_channels; i++) {
+       for (i = 0; i < nch; i++) {
                err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
                if (err)
                        goto err_close_channels;
        }
 
-       for (j = 0; j < priv->params.num_channels; j++) {
+       for (j = 0; j < nch; j++) {
                err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
                if (err)
                        goto err_close_channels;
@@ -1130,11 +1140,10 @@ static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
 
 static int mlx5e_open_tises(struct mlx5e_priv *priv)
 {
-       int num_tc = priv->num_tc;
        int err;
        int tc;
 
-       for (tc = 0; tc < num_tc; tc++) {
+       for (tc = 0; tc < priv->params.num_tc; tc++) {
                err = mlx5e_open_tis(priv, tc);
                if (err)
                        goto err_close_tises;
@@ -1151,26 +1160,41 @@ err_close_tises:
 
 static void mlx5e_close_tises(struct mlx5e_priv *priv)
 {
-       int num_tc = priv->num_tc;
        int tc;
 
-       for (tc = 0; tc < num_tc; tc++)
+       for (tc = 0; tc < priv->params.num_tc; tc++)
                mlx5e_close_tis(priv, tc);
 }
 
+static int mlx5e_rx_hash_fn(int hfunc)
+{
+       return (hfunc == ETH_RSS_HASH_TOP) ?
+              MLX5_RX_HASH_FN_TOEPLITZ :
+              MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+       int inv = 0;
+       int i;
+
+       for (i = 0; i < size; i++)
+               inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+       return inv;
+}
+
 static int mlx5e_open_rqt(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        u32 *in;
-       u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
        void *rqtc;
        int inlen;
        int err;
-       int sz;
+       int log_tbl_sz = priv->params.rx_hash_log_tbl_sz;
+       int sz = 1 << log_tbl_sz;
        int i;
 
-       sz = 1 << priv->params.rx_hash_log_tbl_sz;
-
        inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
        if (!in)
@@ -1182,17 +1206,16 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
        MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
        for (i = 0; i < sz; i++) {
-               int ix = i % priv->params.num_channels;
+               int ix = i;
 
+               if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+                       ix = mlx5e_bits_invert(i, log_tbl_sz);
+
+               ix = ix % priv->params.num_channels;
                MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
        }
 
-       MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
-       memset(out, 0, sizeof(out));
-       err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
-       if (!err)
-               priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+       err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn);
 
        kvfree(in);
 
@@ -1201,16 +1224,7 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
 
 static void mlx5e_close_rqt(struct mlx5e_priv *priv)
 {
-       u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
-       u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
-
-       memset(in, 0, sizeof(in));
-
-       MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
-       MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
-
-       mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
-                                  sizeof(out));
+       mlx5_core_destroy_rqt(priv->mdev, priv->rqtn);
 }
 
 static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
@@ -1221,13 +1235,17 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
 
 #define ROUGH_MAX_L2_L3_HDR_SZ 256
 
-#define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                         MLX5_HASH_FIELD_SEL_DST_IP)
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
+                                MLX5_HASH_FIELD_SEL_L4_DPORT)
 
-#define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                         MLX5_HASH_FIELD_SEL_DST_IP   |\
-                         MLX5_HASH_FIELD_SEL_L4_SPORT |\
-                         MLX5_HASH_FIELD_SEL_L4_DPORT)
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
 
        if (priv->params.lro_en) {
                MLX5_SET(tirc, tirc, lro_enable_mask,
@@ -1254,12 +1272,16 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(tirc, tirc, indirect_table,
                         priv->rqtn);
                MLX5_SET(tirc, tirc, rx_hash_fn,
-                        MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
-               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-               netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
-                                                rx_hash_toeplitz_key),
-                                   MLX5_FLD_SZ_BYTES(tirc,
-                                                     rx_hash_toeplitz_key));
+                        mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+               if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+                       void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+                                                    rx_hash_toeplitz_key);
+                       size_t len = MLX5_FLD_SZ_BYTES(tirc,
+                                                      rx_hash_toeplitz_key);
+
+                       MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+                       netdev_rss_key_fill(rss_key, len);
+               }
                break;
        }
 
@@ -1270,7 +1292,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
                         MLX5_L4_PROT_TYPE_TCP);
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_ALL);
+                        MLX5_HASH_IP_L4PORTS);
                break;
 
        case MLX5E_TT_IPV6_TCP:
@@ -1279,7 +1301,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
                         MLX5_L4_PROT_TYPE_TCP);
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_ALL);
+                        MLX5_HASH_IP_L4PORTS);
                break;
 
        case MLX5E_TT_IPV4_UDP:
@@ -1288,7 +1310,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
                         MLX5_L4_PROT_TYPE_UDP);
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_ALL);
+                        MLX5_HASH_IP_L4PORTS);
                break;
 
        case MLX5E_TT_IPV6_UDP:
@@ -1297,7 +1319,35 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
                         MLX5_L4_PROT_TYPE_UDP);
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_ALL);
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
                break;
 
        case MLX5E_TT_IPV4:
@@ -1520,26 +1570,6 @@ static int mlx5e_close(struct net_device *netdev)
        return err;
 }
 
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-                            struct mlx5e_params *new_params)
-{
-       int err = 0;
-       int was_opened;
-
-       WARN_ON(!mutex_is_locked(&priv->state_lock));
-
-       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
-               mlx5e_close_locked(priv->netdev);
-
-       priv->params = *new_params;
-
-       if (was_opened)
-               err = mlx5e_open_locked(priv->netdev);
-
-       return err;
-}
-
 static struct rtnl_link_stats64 *
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
@@ -1589,20 +1619,22 @@ static int mlx5e_set_features(struct net_device *netdev,
                              netdev_features_t features)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err = 0;
        netdev_features_t changes = features ^ netdev->features;
-       struct mlx5e_params new_params;
-       bool update_params = false;
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->params;
 
        if (changes & NETIF_F_LRO) {
-               new_params.lro_en = !!(features & NETIF_F_LRO);
-               update_params = true;
-       }
+               bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+               if (was_opened)
+                       mlx5e_close_locked(priv->netdev);
 
-       if (update_params)
-               mlx5e_update_priv_params(priv, &new_params);
+               priv->params.lro_en = !!(features & NETIF_F_LRO);
+
+               if (was_opened)
+                       err = mlx5e_open_locked(priv->netdev);
+       }
 
        if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
                if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1620,8 +1652,9 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       bool was_opened;
        int max_mtu;
-       int err;
+       int err = 0;
 
        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
@@ -1633,8 +1666,16 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
        }
 
        mutex_lock(&priv->state_lock);
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(netdev);
+
        netdev->mtu = new_mtu;
-       err = mlx5e_update_priv_params(priv, &priv->params);
+
+       if (was_opened)
+               err = mlx5e_open_locked(netdev);
+
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -1673,6 +1714,15 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
        return 0;
 }
 
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+       int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+       return bf_buf_size -
+              sizeof(struct mlx5e_tx_wqe) +
+              2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                                    struct net_device *netdev,
                                    int num_comp_vectors)
@@ -1691,6 +1741,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
        priv->params.tx_cq_moderation_pkts =
                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+       priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
        priv->params.min_rx_wqes           =
                MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
        priv->params.rx_hash_log_tbl_sz    =
@@ -1700,6 +1751,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
        priv->params.num_tc                = 1;
        priv->params.default_vlan_prio     = 0;
+       priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
        priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
        priv->params.lro_wqe_sz            =
@@ -1708,7 +1760,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
        priv->mdev                         = mdev;
        priv->netdev                       = netdev;
        priv->params.num_channels          = num_comp_vectors;
-       priv->num_tc                       = priv->params.num_tc;
        priv->default_vlan_prio            = priv->params.default_vlan_prio;
 
        spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1784,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
 
        SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
 
-       if (priv->num_tc > 1) {
+       if (priv->params.num_tc > 1)
                mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
-       }
 
        netdev->netdev_ops        = &mlx5e_netdev_ops;
        netdev->watchdog_timeo    = 15 * HZ;
@@ -1819,36 +1869,31 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 
        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
        if (err) {
-               netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
                goto err_free_netdev;
        }
 
        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
        if (err) {
-               netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
                goto err_unmap_free_uar;
        }
 
        err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
        if (err) {
-               netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "alloc td failed, %d\n", err);
                goto err_dealloc_pd;
        }
 
        err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
        if (err) {
-               netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "create mkey failed, %d\n", err);
                goto err_dealloc_transport_domain;
        }
 
        err = register_netdev(netdev);
        if (err) {
-               netdev_err(netdev, "%s: register_netdev failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
                goto err_destroy_mkey;
        }
 
index 03f28f438e55ab690cc865b6bc3509a53d280e25..64380bc0cd6a5df34b99531c0565718a4d2b207c 100644 (file)
@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
 
        if (notify_hw) {
                cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-               mlx5e_tx_notify_hw(sq, wqe);
+               mlx5e_tx_notify_hw(sq, wqe, 0);
        }
 }
 
@@ -110,9 +110,17 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 }
 
 static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
-                                           struct sk_buff *skb)
+                                           struct sk_buff *skb, bool bf)
 {
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+       /* Some NIC TX decisions, e.g loopback, are based on the packet
+        * headers and occur before the data gather.
+        * Therefore these headers must be copied into the WQE
+        */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+       if (bf && (skb_headlen(skb) <= sq->max_inline))
+               return skb_headlen(skb);
+
        return MLX5E_MIN_INLINE;
 }
 
@@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
 
        u8  opcode = MLX5_OPCODE_SEND;
        dma_addr_t dma_addr = 0;
+       bool bf = false;
        u16 headlen;
        u16 ds_cnt;
        u16 ihs;
@@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        else
                sq->stats.csum_offload_none++;
 
+       if (sq->cc != sq->prev_cc) {
+               sq->prev_cc = sq->cc;
+               sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+       }
+
        if (skb_is_gso(skb)) {
                u32 payload_len;
 
@@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                sq->stats.tso_packets++;
                sq->stats.tso_bytes += payload_len;
        } else {
-               ihs = mlx5e_get_inline_hdr_size(sq, skb);
+               bf = sq->bf_budget &&
+                    !skb->xmit_more &&
+                    !skb_shinfo(skb)->nr_frags;
+               ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
                MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
                                                        ETH_ZLEN);
        }
@@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        }
 
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+               int bf_sz = 0;
+
+               if (bf && sq->uar_bf_map)
+                       bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
                cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-               mlx5e_tx_notify_hw(sq, wqe);
+               mlx5e_tx_notify_hw(sq, wqe, bf_sz);
        }
 
        /* fill sq edge with nops to avoid wqe wrap around */
        while ((sq->pc & wq->sz_m1) > sq->edge)
                mlx5e_send_nop(sq, false);
 
+       sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
        sq->stats.packets++;
        return NETDEV_TX_OK;
 
index afad529838de748efc9f9253c6fde42abbe954a3..603a8b0908eea74a39bb88d9736d200d8acc3573 100644 (file)
@@ -455,7 +455,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        struct mlx5_priv *priv  = &mdev->priv;
        struct msix_entry *msix = priv->msix_arr;
        int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-       int numa_node           = dev_to_node(&mdev->pdev->dev);
+       int numa_node           = priv->numa_node;
        int err;
 
        if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -654,6 +654,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
 }
 #endif
 
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+       resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+       resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+       dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+       return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+       if (dev->priv.bf_mapping)
+               io_mapping_free(dev->priv.bf_mapping);
+}
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
        struct mlx5_priv *priv = &dev->priv;
@@ -668,6 +684,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
        INIT_LIST_HEAD(&priv->pgdir_list);
        spin_lock_init(&priv->mkey_lock);
 
+       mutex_init(&priv->alloc_mutex);
+
+       priv->numa_node = dev_to_node(&dev->pdev->dev);
+
        priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
        if (!priv->dbg_root)
                return -ENOMEM;
@@ -804,10 +824,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
                goto err_stop_eqs;
        }
 
+       if (map_bf_area(dev))
+               dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
        err = mlx5_irq_set_affinity_hints(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-               goto err_free_comp_eqs;
+               goto err_unmap_bf_area;
        }
 
        MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -819,7 +842,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        return 0;
 
-err_free_comp_eqs:
+err_unmap_bf_area:
+       unmap_bf_area(dev);
+
        free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -877,6 +902,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
        mlx5_irq_clear_affinity_hints(dev);
+       unmap_bf_area(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_free_uuars(dev, &priv->uuari);
index fc88ecaecb4b4307f2d5c796cc91c548e856f845..566a70488db12ddef46f5623cb77c3e1e4c4ab2c 100644 (file)
@@ -73,7 +73,12 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
                                             int in_size, u32 *out,
                                             int out_size)
 {
-       mlx5_cmd_exec(dev, in, in_size, out, out_size);
+       int err;
+
+       err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+       if (err)
+               return err;
+
        return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
 }
 
index 8d98b03026d5db588eee7de23f04611f268b4543..c4f3f74908ec220254137ea7893cd373c6c3b507 100644 (file)
@@ -358,3 +358,32 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
        return  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
                                           sizeof(out));
 }
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *rqtn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+       int err;
+
+       MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+       return err;
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+       MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
index f9ef244710d534b5e22bddd313d1497b36da12fa..10bd75e7d9b1dd3f44fab37bc274bbe219040677 100644 (file)
@@ -61,4 +61,8 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
 int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
 int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
 
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *rqtn);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
 #endif /* __TRANSOBJ_H__ */
index 9ef85873ceea8203655c8e63cdc4601d15088157..eb05c845ece9247e7e54fee5880ad27be7be7253 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/io-mapping.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
@@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
                goto err_free_uar;
        }
 
+       if (mdev->priv.bf_mapping)
+               uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+                                               uar->index << PAGE_SHIFT);
+
        return 0;
 
 err_free_uar:
@@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
+       io_mapping_unmap(uar->bf_map);
        iounmap(uar->map);
        mlx5_cmd_free_uar(mdev, uar->index);
 }
index 8388411582cf80cfc1e0ab9b8ac48491c2a68f97..ce21ee5b23577ee63e8b5679a1fe2204c3ccd895 100644 (file)
@@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
        wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
        wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+       err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+                                 &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
                goto err_db_free;
@@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
        wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
        wq->sz_m1 = (1 << wq->log_sz) - 1;
 
-       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+       err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+                                 &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
                goto err_db_free;
@@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
        wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
        wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
                return err;
index e0ddd69fb429ff5a2ded06572f43949f2ec67945..6c2a8f95093c6b0ea1212ac8ae9b1e29f421dad0 100644 (file)
@@ -37,7 +37,8 @@
 
 struct mlx5_wq_param {
        int             linear;
-       int             numa;
+       int             buf_numa_node;
+       int             db_numa_node;
 };
 
 struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644 (file)
index 0000000..8d1080d
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+       tristate "Mellanox Technologies Switch ASICs support"
+       ---help---
+         This driver supports Mellanox Technologies Switch ASICs family.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mlxsw_core.
+
+config MLXSW_PCI
+       tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+       depends on PCI && MLXSW_CORE
+       default m
+       ---help---
+         This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+       tristate "Mellanox Technologies SwitchX-2 support"
+       depends on MLXSW_CORE && NET_SWITCHDEV
+       default m
+       ---help---
+         This driver supports Mellanox Technologies SwitchX-2 Ethernet
+         Switch ASICs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mlxsw_switchx2.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644 (file)
index 0000000..0a05f65
--- /dev/null
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MLXSW_CORE)       += mlxsw_core.o
+mlxsw_core-objs                        := core.o
+obj-$(CONFIG_MLXSW_PCI)                += mlxsw_pci.o
+mlxsw_pci-objs                 := pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2)   += mlxsw_switchx2.o
+mlxsw_switchx2-objs            := switchx2.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644 (file)
index 0000000..770db17
--- /dev/null
@@ -0,0 +1,1090 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE    4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+       return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+       kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+       memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+                  u32 in_mod, bool out_mbox_direct,
+                  char *in_mbox, size_t in_mbox_size,
+                  char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+                                   u8 opcode_mod, u32 in_mod, char *in_mbox,
+                                   size_t in_mbox_size)
+{
+       return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+                             in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+                                    u8 opcode_mod, u32 in_mod,
+                                    bool out_mbox_direct,
+                                    char *out_mbox, size_t out_mbox_size)
+{
+       return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+                             out_mbox_direct, NULL, 0,
+                             out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+                                     u8 opcode_mod, u32 in_mod)
+{
+       return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+                             NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+       MLXSW_CMD_OPCODE_QUERY_FW               = 0x004,
+       MLXSW_CMD_OPCODE_QUERY_BOARDINFO        = 0x006,
+       MLXSW_CMD_OPCODE_QUERY_AQ_CAP           = 0x003,
+       MLXSW_CMD_OPCODE_MAP_FA                 = 0xFFF,
+       MLXSW_CMD_OPCODE_UNMAP_FA               = 0xFFE,
+       MLXSW_CMD_OPCODE_CONFIG_PROFILE         = 0x100,
+       MLXSW_CMD_OPCODE_ACCESS_REG             = 0x040,
+       MLXSW_CMD_OPCODE_SW2HW_DQ               = 0x201,
+       MLXSW_CMD_OPCODE_HW2SW_DQ               = 0x202,
+       MLXSW_CMD_OPCODE_2ERR_DQ                = 0x01E,
+       MLXSW_CMD_OPCODE_QUERY_DQ               = 0x022,
+       MLXSW_CMD_OPCODE_SW2HW_CQ               = 0x016,
+       MLXSW_CMD_OPCODE_HW2SW_CQ               = 0x017,
+       MLXSW_CMD_OPCODE_QUERY_CQ               = 0x018,
+       MLXSW_CMD_OPCODE_SW2HW_EQ               = 0x013,
+       MLXSW_CMD_OPCODE_HW2SW_EQ               = 0x014,
+       MLXSW_CMD_OPCODE_QUERY_EQ               = 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+       switch (opcode) {
+       case MLXSW_CMD_OPCODE_QUERY_FW:
+               return "QUERY_FW";
+       case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+               return "QUERY_BOARDINFO";
+       case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+               return "QUERY_AQ_CAP";
+       case MLXSW_CMD_OPCODE_MAP_FA:
+               return "MAP_FA";
+       case MLXSW_CMD_OPCODE_UNMAP_FA:
+               return "UNMAP_FA";
+       case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+               return "CONFIG_PROFILE";
+       case MLXSW_CMD_OPCODE_ACCESS_REG:
+               return "ACCESS_REG";
+       case MLXSW_CMD_OPCODE_SW2HW_DQ:
+               return "SW2HW_DQ";
+       case MLXSW_CMD_OPCODE_HW2SW_DQ:
+               return "HW2SW_DQ";
+       case MLXSW_CMD_OPCODE_2ERR_DQ:
+               return "2ERR_DQ";
+       case MLXSW_CMD_OPCODE_QUERY_DQ:
+               return "QUERY_DQ";
+       case MLXSW_CMD_OPCODE_SW2HW_CQ:
+               return "SW2HW_CQ";
+       case MLXSW_CMD_OPCODE_HW2SW_CQ:
+               return "HW2SW_CQ";
+       case MLXSW_CMD_OPCODE_QUERY_CQ:
+               return "QUERY_CQ";
+       case MLXSW_CMD_OPCODE_SW2HW_EQ:
+               return "SW2HW_EQ";
+       case MLXSW_CMD_OPCODE_HW2SW_EQ:
+               return "HW2SW_EQ";
+       case MLXSW_CMD_OPCODE_QUERY_EQ:
+               return "QUERY_EQ";
+       default:
+               return "*UNKNOWN*";
+       }
+}
+
+enum mlxsw_cmd_status {
+       /* Command execution succeeded. */
+       MLXSW_CMD_STATUS_OK             = 0x00,
+       /* Internal error (e.g. bus error) occurred while processing command. */
+       MLXSW_CMD_STATUS_INTERNAL_ERR   = 0x01,
+       /* Operation/command not supported or opcode modifier not supported. */
+       MLXSW_CMD_STATUS_BAD_OP         = 0x02,
+       /* Parameter not supported, parameter out of range. */
+       MLXSW_CMD_STATUS_BAD_PARAM      = 0x03,
+       /* System was not enabled or bad system state. */
+       MLXSW_CMD_STATUS_BAD_SYS_STATE  = 0x04,
+       /* Attempt to access reserved or unallocated resource, or resource in
+        * inappropriate ownership.
+        */
+       MLXSW_CMD_STATUS_BAD_RESOURCE   = 0x05,
+       /* Requested resource is currently executing a command. */
+       MLXSW_CMD_STATUS_RESOURCE_BUSY  = 0x06,
+       /* Required capability exceeds device limits. */
+       MLXSW_CMD_STATUS_EXCEED_LIM     = 0x08,
+       /* Resource is not in the appropriate state or ownership. */
+       MLXSW_CMD_STATUS_BAD_RES_STATE  = 0x09,
+       /* Index out of range (might be beyond table size or attempt to
+        * access a reserved resource).
+        */
+       MLXSW_CMD_STATUS_BAD_INDEX      = 0x0A,
+       /* NVMEM checksum/CRC failed. */
+       MLXSW_CMD_STATUS_BAD_NVMEM      = 0x0B,
+       /* Bad management packet (silently discarded). */
+       MLXSW_CMD_STATUS_BAD_PKT        = 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+       switch (status) {
+       case MLXSW_CMD_STATUS_OK:
+               return "OK";
+       case MLXSW_CMD_STATUS_INTERNAL_ERR:
+               return "INTERNAL_ERR";
+       case MLXSW_CMD_STATUS_BAD_OP:
+               return "BAD_OP";
+       case MLXSW_CMD_STATUS_BAD_PARAM:
+               return "BAD_PARAM";
+       case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+               return "BAD_SYS_STATE";
+       case MLXSW_CMD_STATUS_BAD_RESOURCE:
+               return "BAD_RESOURCE";
+       case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+               return "RESOURCE_BUSY";
+       case MLXSW_CMD_STATUS_EXCEED_LIM:
+               return "EXCEED_LIM";
+       case MLXSW_CMD_STATUS_BAD_RES_STATE:
+               return "BAD_RES_STATE";
+       case MLXSW_CMD_STATUS_BAD_INDEX:
+               return "BAD_INDEX";
+       case MLXSW_CMD_STATUS_BAD_NVMEM:
+               return "BAD_NVMEM";
+       case MLXSW_CMD_STATUS_BAD_PKT:
+               return "BAD_PKT";
+       default:
+               return "*UNKNOWN*";
+       }
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+                                    char *out_mbox)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+                                 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+                                     char *out_mbox)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+                                 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+                                        char *out_mbox)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+                                 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+                                  char *in_mbox, u32 vpm_entries_count)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+                                0, vpm_entries_count,
+                                in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+                                              char *in_mbox)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+                                1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_fid_based
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+            set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of Flooding Tables. Flooding Tables are associated to
+ * the different packet types for the different switch partitions.
+ * Note that the table size depends on the fid_based mode.
+ * In SwitchX silicon, tables are split equally between the switch
+ * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
+ * with swid-1 and the last 4 are associated with swid-2.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_fid_based
+ * FID Based Flood Mode
+ * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
+ * 01 Use FID to offset the index to the Port Group Table (pgi)
+ * 10 Use FID to offset the index to the Port Group Table (pgi) and
+ * the Multicast ID
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+                    0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+                    0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+                    0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+                                      char *in_mbox, char *out_mbox)
+{
+       return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+                             0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+                             out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+                                      char *in_mbox, u32 dq_number,
+                                      u8 opcode_mod)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+                                opcode_mod, dq_number,
+                                in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+       MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+       MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+                                     char *in_mbox, u32 dq_number)
+{
+       return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+                                     char *in_mbox, u32 dq_number)
+{
+       return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+                                      u32 dq_number, u8 opcode_mod)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+                                  opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+                                     u32 dq_number)
+{
+       return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+                                     u32 dq_number)
+{
+       return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+                                     u32 dq_number, u8 opcode_mod)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+                                  opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+                                    u32 dq_number)
+{
+       return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+                                  MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+                                    u32 dq_number)
+{
+       return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+                                  MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+                                      char *out_mbox, u32 dq_number,
+                                      u8 opcode_mod)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+                                 opcode_mod, dq_number, false,
+                                 out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+                                     char *out_mbox, u32 dq_number)
+{
+       return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+                                     char *out_mbox, u32 dq_number)
+{
+       return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+                                    char *in_mbox, u32 cq_number)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+                                0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+                                    u32 cq_number)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+                                  0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+                                    char *out_mbox, u32 cq_number)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+                                 0, cq_number, false,
+                                 out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+                                    char *in_mbox, u32 eq_number)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+                                0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+                                    u32 eq_number)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+                                  0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+                                    char *out_mbox, u32 eq_number)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+                                 0, eq_number, false,
+                                 out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644 (file)
index 0000000..ad66ae4
--- /dev/null
@@ -0,0 +1,1286 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+       u64                     trap_rx_packets[MLXSW_TRAP_ID_MAX];
+       u64                     trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+       u64                     port_rx_packets[MLXSW_PORT_MAX_PORTS];
+       u64                     port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+       struct u64_stats_sync   syncp;
+       u32                     trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+       u32                     port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+       u32                     trap_rx_invalid;
+       u32                     port_rx_invalid;
+};
+
+struct mlxsw_core {
+       struct mlxsw_driver *driver;
+       const struct mlxsw_bus *bus;
+       void *bus_priv;
+       const struct mlxsw_bus_info *bus_info;
+       struct list_head rx_listener_list;
+       struct list_head event_listener_list;
+       struct {
+               struct sk_buff *resp_skb;
+               u64 tid;
+               wait_queue_head_t wait;
+               bool trans_active;
+               struct mutex lock; /* One EMAD transaction at a time. */
+               bool use_emad;
+       } emad;
+       struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+       struct dentry *dbg_dir;
+       struct {
+               struct debugfs_blob_wrapper vsd_blob;
+               struct debugfs_blob_wrapper psid_blob;
+       } dbg;
+       unsigned long driver_priv[0];
+       /* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+       struct list_head list;
+       struct mlxsw_rx_listener rxl;
+       void *priv;
+};
+
+struct mlxsw_event_listener_item {
+       struct list_head list;
+       struct mlxsw_event_listener el;
+       void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+       MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+       MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+       switch (type) {
+       case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+               return "query";
+       case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+               return "write";
+       }
+       BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+       mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+       mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+                                   const struct mlxsw_reg_info *reg,
+                                   char *payload)
+{
+       mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+       mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+       memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+                                  const struct mlxsw_reg_info *reg,
+                                  enum mlxsw_core_reg_access_type type,
+                                  struct mlxsw_core *mlxsw_core)
+{
+       mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+       mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+       mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+       mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+       mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+       mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+       if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+               mlxsw_emad_op_tlv_method_set(op_tlv,
+                                            MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+       else
+               mlxsw_emad_op_tlv_method_set(op_tlv,
+                                            MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+       mlxsw_emad_op_tlv_class_set(op_tlv,
+                                   MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+       mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+       char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+       mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+       mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+       mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+       mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+       mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+       skb_reset_mac_header(skb);
+
+       return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+                                const struct mlxsw_reg_info *reg,
+                                char *payload,
+                                enum mlxsw_core_reg_access_type type,
+                                struct mlxsw_core *mlxsw_core)
+{
+       char *buf;
+
+       buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+       mlxsw_emad_pack_end_tlv(buf);
+
+       buf = skb_push(skb, reg->len + sizeof(u32));
+       mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+       buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+       mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+       mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+       return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+       return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+                                     MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+       return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+       char *op_tlv;
+
+       op_tlv = mlxsw_emad_op_tlv(skb);
+       return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+       char *op_tlv;
+
+       op_tlv = mlxsw_emad_op_tlv(skb);
+       return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+                                struct sk_buff *skb,
+                                const struct mlxsw_tx_info *tx_info)
+{
+       int err;
+       int ret;
+
+       err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+       if (err) {
+               dev_warn(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+                        mlxsw_core->emad.tid);
+               dev_kfree_skb(skb);
+               return err;
+       }
+
+       mlxsw_core->emad.trans_active = true;
+       ret = wait_event_timeout(mlxsw_core->emad.wait,
+                                !(mlxsw_core->emad.trans_active),
+                                msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+       if (!ret) {
+               dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+                        mlxsw_core->emad.tid);
+               mlxsw_core->emad.trans_active = false;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+                                    char *op_tlv)
+{
+       enum mlxsw_emad_op_tlv_status status;
+       u64 tid;
+
+       status = mlxsw_emad_op_tlv_status_get(op_tlv);
+       tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+       switch (status) {
+       case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+               return 0;
+       case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+       case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+               dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+                        tid, status, mlxsw_emad_op_tlv_status_str(status));
+               return -EAGAIN;
+       case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+       case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+       case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+       case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+       case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+       case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+       case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+       case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+       default:
+               dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+                       tid, status, mlxsw_emad_op_tlv_status_str(status));
+               return -EIO;
+       }
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+                                        struct sk_buff *skb)
+{
+       return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+                              struct sk_buff *skb,
+                              const struct mlxsw_tx_info *tx_info)
+{
+       struct sk_buff *trans_skb;
+       int n_retry;
+       int err;
+
+       n_retry = 0;
+retry:
+       /* We copy the EMAD to a new skb, since we might need
+        * to retransmit it in case of failure.
+        */
+       trans_skb = skb_copy(skb, GFP_KERNEL);
+       if (!trans_skb) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+       if (!err) {
+               struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+               err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+               if (err)
+                       dev_kfree_skb(resp_skb);
+               if (!err || err != -EAGAIN)
+                       goto out;
+       }
+       if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+               goto retry;
+
+out:
+       dev_kfree_skb(skb);
+       mlxsw_core->emad.tid++;
+       return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+                                       void *priv)
+{
+       struct mlxsw_core *mlxsw_core = priv;
+
+       if (mlxsw_emad_is_resp(skb) &&
+           mlxsw_core->emad.trans_active &&
+           mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+               mlxsw_core->emad.resp_skb = skb;
+               mlxsw_core->emad.trans_active = false;
+               wake_up(&mlxsw_core->emad.wait);
+       } else {
+               dev_kfree_skb(skb);
+       }
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+       .func = mlxsw_emad_rx_listener_func,
+       .local_port = MLXSW_PORT_DONT_CARE,
+       .trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+       char htgt_pl[MLXSW_REG_HTGT_LEN];
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int err;
+
+       mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+       err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+                           MLXSW_TRAP_ID_ETHEMAD);
+       return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+       int err;
+
+       /* Set the upper 32 bits of the transaction ID field to a random
+        * number. This allows us to discard EMADs addressed to other
+        * devices.
+        */
+       get_random_bytes(&mlxsw_core->emad.tid, 4);
+       mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+       init_waitqueue_head(&mlxsw_core->emad.wait);
+       mlxsw_core->emad.trans_active = false;
+       mutex_init(&mlxsw_core->emad.lock);
+
+       err = mlxsw_core_rx_listener_register(mlxsw_core,
+                                             &mlxsw_emad_rx_listener,
+                                             mlxsw_core);
+       if (err)
+               return err;
+
+       err = mlxsw_emad_traps_set(mlxsw_core);
+       if (err)
+               goto err_emad_trap_set;
+
+       mlxsw_core->emad.use_emad = true;
+
+       return 0;
+
+err_emad_trap_set:
+       mlxsw_core_rx_listener_unregister(mlxsw_core,
+                                         &mlxsw_emad_rx_listener,
+                                         mlxsw_core);
+       return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+                           MLXSW_TRAP_ID_ETHEMAD);
+       mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+       mlxsw_core_rx_listener_unregister(mlxsw_core,
+                                         &mlxsw_emad_rx_listener,
+                                         mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+                                       u16 reg_len)
+{
+       struct sk_buff *skb;
+       u16 emad_len;
+
+       emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+                   (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+                   sizeof(u32) + mlxsw_core->driver->txhdr_len);
+       if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+               return NULL;
+
+       skb = netdev_alloc_skb(NULL, emad_len);
+       if (!skb)
+               return NULL;
+       memset(skb->data, 0, emad_len);
+       skb_reserve(skb, emad_len);
+
+       return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_core *mlxsw_core = file->private;
+       struct mlxsw_core_pcpu_stats *p;
+       u64 rx_packets, rx_bytes;
+       u64 tmp_rx_packets, tmp_rx_bytes;
+       u32 rx_dropped, rx_invalid;
+       unsigned int start;
+       int i;
+       int j;
+       static const char hdr[] =
+               "     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+               rx_packets = 0;
+               rx_bytes = 0;
+               rx_dropped = 0;
+               for_each_possible_cpu(j) {
+                       p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+                       do {
+                               start = u64_stats_fetch_begin(&p->syncp);
+                               tmp_rx_packets = p->trap_rx_packets[i];
+                               tmp_rx_bytes = p->trap_rx_bytes[i];
+                       } while (u64_stats_fetch_retry(&p->syncp, start));
+
+                       rx_packets += tmp_rx_packets;
+                       rx_bytes += tmp_rx_bytes;
+                       rx_dropped += p->trap_rx_dropped[i];
+               }
+               seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+                          i, rx_packets, rx_bytes, rx_dropped);
+       }
+       rx_invalid = 0;
+       for_each_possible_cpu(j) {
+               p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+               rx_invalid += p->trap_rx_invalid;
+       }
+       seq_printf(file, "trap INV                           %10u\n",
+                  rx_invalid);
+
+       for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+               rx_packets = 0;
+               rx_bytes = 0;
+               rx_dropped = 0;
+               for_each_possible_cpu(j) {
+                       p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+                       do {
+                               start = u64_stats_fetch_begin(&p->syncp);
+                               tmp_rx_packets = p->port_rx_packets[i];
+                               tmp_rx_bytes = p->port_rx_bytes[i];
+                       } while (u64_stats_fetch_retry(&p->syncp, start));
+
+                       rx_packets += tmp_rx_packets;
+                       rx_bytes += tmp_rx_bytes;
+                       rx_dropped += p->port_rx_dropped[i];
+               }
+               seq_printf(file, "port %3d %12llu %12llu %10u\n",
+                          i, rx_packets, rx_bytes, rx_dropped);
+       }
+       rx_invalid = 0;
+       for_each_possible_cpu(j) {
+               p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+               rx_invalid += p->port_rx_invalid;
+       }
+       seq_printf(file, "port INV                           %10u\n",
+                  rx_invalid);
+       return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+       struct mlxsw_core *mlxsw_core = inode->i_private;
+
+       return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+       .owner = THIS_MODULE,
+       .open = mlxsw_core_rx_stats_dbg_open,
+       .release = single_release,
+       .read = seq_read,
+       .llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+                                   const char *buf, size_t size)
+{
+       __be32 *m = (__be32 *) buf;
+       int i;
+       int count = size / sizeof(__be32);
+
+       for (i = count - 1; i >= 0; i--)
+               if (m[i])
+                       break;
+       i++;
+       count = i ? i : 1;
+       for (i = 0; i < count; i += 4)
+               dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+                       i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+                       be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+       spin_lock(&mlxsw_core_driver_list_lock);
+       list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+       spin_unlock(&mlxsw_core_driver_list_lock);
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+       spin_lock(&mlxsw_core_driver_list_lock);
+       list_del(&mlxsw_driver->list);
+       spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+       struct mlxsw_driver *mlxsw_driver;
+
+       list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+               if (strcmp(mlxsw_driver->kind, kind) == 0)
+                       return mlxsw_driver;
+       }
+       return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+       struct mlxsw_driver *mlxsw_driver;
+
+       spin_lock(&mlxsw_core_driver_list_lock);
+       mlxsw_driver = __driver_find(kind);
+       if (!mlxsw_driver) {
+               spin_unlock(&mlxsw_core_driver_list_lock);
+               request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+               spin_lock(&mlxsw_core_driver_list_lock);
+               mlxsw_driver = __driver_find(kind);
+       }
+       if (mlxsw_driver) {
+               if (!try_module_get(mlxsw_driver->owner))
+                       mlxsw_driver = NULL;
+       }
+
+       spin_unlock(&mlxsw_core_driver_list_lock);
+       return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+       struct mlxsw_driver *mlxsw_driver;
+
+       spin_lock(&mlxsw_core_driver_list_lock);
+       mlxsw_driver = __driver_find(kind);
+       spin_unlock(&mlxsw_core_driver_list_lock);
+       if (!mlxsw_driver)
+               return;
+       module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+       const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+       mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+                                                mlxsw_core_dbg_root);
+       if (!mlxsw_core->dbg_dir)
+               return -ENOMEM;
+       debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+                           mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+       mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+       mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+       debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+                           &mlxsw_core->dbg.vsd_blob);
+       mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+       mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+       debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+                           &mlxsw_core->dbg.psid_blob);
+       return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+       debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+                                  const struct mlxsw_bus *mlxsw_bus,
+                                  void *bus_priv)
+{
+       const char *device_kind = mlxsw_bus_info->device_kind;
+       struct mlxsw_core *mlxsw_core;
+       struct mlxsw_driver *mlxsw_driver;
+       size_t alloc_size;
+       int err;
+
+       mlxsw_driver = mlxsw_core_driver_get(device_kind);
+       if (!mlxsw_driver)
+               return -EINVAL;
+       alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+       mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+       if (!mlxsw_core) {
+               err = -ENOMEM;
+               goto err_core_alloc;
+       }
+
+       INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+       INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+       mlxsw_core->driver = mlxsw_driver;
+       mlxsw_core->bus = mlxsw_bus;
+       mlxsw_core->bus_priv = bus_priv;
+       mlxsw_core->bus_info = mlxsw_bus_info;
+
+       mlxsw_core->pcpu_stats =
+               netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+       if (!mlxsw_core->pcpu_stats) {
+               err = -ENOMEM;
+               goto err_alloc_stats;
+       }
+
+       err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+       if (err)
+               goto err_bus_init;
+
+       err = mlxsw_emad_init(mlxsw_core);
+       if (err)
+               goto err_emad_init;
+
+       err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+                                mlxsw_bus_info);
+       if (err)
+               goto err_driver_init;
+
+       err = mlxsw_core_debugfs_init(mlxsw_core);
+       if (err)
+               goto err_debugfs_init;
+
+       return 0;
+
+err_debugfs_init:
+       mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+       mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+       mlxsw_bus->fini(bus_priv);
+err_bus_init:
+       free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+       kfree(mlxsw_core);
+err_core_alloc:
+       mlxsw_core_driver_put(device_kind);
+       return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+       const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+       mlxsw_core_debugfs_fini(mlxsw_core);
+       mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+       mlxsw_emad_fini(mlxsw_core);
+       mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+       free_percpu(mlxsw_core->pcpu_stats);
+       kfree(mlxsw_core);
+       mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+       return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+                           const struct mlxsw_tx_info *tx_info)
+{
+       struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+       return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+                                            tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+                                  const struct mlxsw_rx_listener *rxl_b)
+{
+       return (rxl_a->func == rxl_b->func &&
+               rxl_a->local_port == rxl_b->local_port &&
+               rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+                       const struct mlxsw_rx_listener *rxl,
+                       void *priv)
+{
+       struct mlxsw_rx_listener_item *rxl_item;
+
+       list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+               if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+                   rxl_item->priv == priv)
+                       return rxl_item;
+       }
+       return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+                                   const struct mlxsw_rx_listener *rxl,
+                                   void *priv)
+{
+       struct mlxsw_rx_listener_item *rxl_item;
+
+       rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+       if (rxl_item)
+               return -EEXIST;
+       rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+       if (!rxl_item)
+               return -ENOMEM;
+       rxl_item->rxl = *rxl;
+       rxl_item->priv = priv;
+
+       list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+                                      const struct mlxsw_rx_listener *rxl,
+                                      void *priv)
+{
+       struct mlxsw_rx_listener_item *rxl_item;
+
+       rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+       if (!rxl_item)
+               return;
+       list_del_rcu(&rxl_item->list);
+       synchronize_rcu();
+       kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+                                          void *priv)
+{
+       struct mlxsw_event_listener_item *event_listener_item = priv;
+       struct mlxsw_reg_info reg;
+       char *payload;
+       char *op_tlv = mlxsw_emad_op_tlv(skb);
+       char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+       reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+       reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+       payload = mlxsw_emad_reg_payload(op_tlv);
+       event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+       dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+                                     const struct mlxsw_event_listener *el_b)
+{
+       return (el_a->func == el_b->func &&
+               el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+                          const struct mlxsw_event_listener *el,
+                          void *priv)
+{
+       struct mlxsw_event_listener_item *el_item;
+
+       list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+               if (__is_event_listener_equal(&el_item->el, el) &&
+                   el_item->priv == priv)
+                       return el_item;
+       }
+       return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+                                      const struct mlxsw_event_listener *el,
+                                      void *priv)
+{
+       int err;
+       struct mlxsw_event_listener_item *el_item;
+       const struct mlxsw_rx_listener rxl = {
+               .func = mlxsw_core_event_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = el->trap_id,
+       };
+
+       el_item = __find_event_listener_item(mlxsw_core, el, priv);
+       if (el_item)
+               return -EEXIST;
+       el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+       if (!el_item)
+               return -ENOMEM;
+       el_item->el = *el;
+       el_item->priv = priv;
+
+       err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+       if (err)
+               goto err_rx_listener_register;
+
+       /* No reason to save item if we did not manage to register an RX
+        * listener for it.
+        */
+       list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+       return 0;
+
+err_rx_listener_register:
+       kfree(el_item);
+       return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+                                         const struct mlxsw_event_listener *el,
+                                         void *priv)
+{
+       struct mlxsw_event_listener_item *el_item;
+       const struct mlxsw_rx_listener rxl = {
+               .func = mlxsw_core_event_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = el->trap_id,
+       };
+
+       el_item = __find_event_listener_item(mlxsw_core, el, priv);
+       if (!el_item)
+               return;
+       mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+       list_del(&el_item->list);
+       kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+                                     const struct mlxsw_reg_info *reg,
+                                     char *payload,
+                                     enum mlxsw_core_reg_access_type type)
+{
+       int err;
+       char *op_tlv;
+       struct sk_buff *skb;
+       struct mlxsw_tx_info tx_info = {
+               .local_port = MLXSW_PORT_CPU_PORT,
+               .is_emad = true,
+       };
+
+       skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+       if (!skb)
+               return -ENOMEM;
+
+       mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+       mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+       dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+               mlxsw_core->emad.tid);
+       mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+       err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+       if (!err) {
+               op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+               memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+                      reg->len);
+
+               dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+                       mlxsw_core->emad.tid - 1);
+               mlxsw_core_buf_dump_dbg(mlxsw_core,
+                                       mlxsw_core->emad.resp_skb->data,
+                                       skb->len);
+
+               dev_kfree_skb(mlxsw_core->emad.resp_skb);
+       }
+
+       return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+                                    const struct mlxsw_reg_info *reg,
+                                    char *payload,
+                                    enum mlxsw_core_reg_access_type type)
+{
+       int err, n_retry;
+       char *in_mbox, *out_mbox, *tmp;
+
+       in_mbox = mlxsw_cmd_mbox_alloc();
+       if (!in_mbox)
+               return -ENOMEM;
+
+       out_mbox = mlxsw_cmd_mbox_alloc();
+       if (!out_mbox) {
+               err = -ENOMEM;
+               goto free_in_mbox;
+       }
+
+       mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+       tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+       mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+       n_retry = 0;
+retry:
+       err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+       if (!err) {
+               err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+               if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+                       goto retry;
+       }
+
+       if (!err)
+               memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+                      reg->len);
+
+       mlxsw_core->emad.tid++;
+       mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+       mlxsw_cmd_mbox_free(in_mbox);
+       return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+                                const struct mlxsw_reg_info *reg,
+                                char *payload,
+                                enum mlxsw_core_reg_access_type type)
+{
+       u64 cur_tid;
+       int err;
+
+       if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+               dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+                       reg->id, mlxsw_reg_id_str(reg->id),
+                       mlxsw_core_reg_access_type_str(type));
+               return -EINTR;
+       }
+
+       cur_tid = mlxsw_core->emad.tid;
+       dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+               cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+               mlxsw_core_reg_access_type_str(type));
+
+       /* During initialization EMAD interface is not available to us,
+        * so we default to command interface. We switch to EMAD interface
+        * after setting the appropriate traps.
+        */
+       if (!mlxsw_core->emad.use_emad)
+               err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+                                               payload, type);
+       else
+               err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+                                                payload, type);
+
+       if (err)
+               dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+                       cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+                       mlxsw_core_reg_access_type_str(type));
+
+       mutex_unlock(&mlxsw_core->emad.lock);
+       return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_reg_info *reg, char *payload)
+{
+       return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+                                    MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_reg_info *reg, char *payload)
+{
+       return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+                                    MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+                           struct mlxsw_rx_info *rx_info)
+{
+       struct mlxsw_rx_listener_item *rxl_item;
+       const struct mlxsw_rx_listener *rxl;
+       struct mlxsw_core_pcpu_stats *pcpu_stats;
+       u8 local_port = rx_info->sys_port;
+       bool found = false;
+
+       dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+                           __func__, rx_info->sys_port, rx_info->trap_id);
+
+       if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+           (local_port >= MLXSW_PORT_MAX_PORTS))
+               goto drop;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+               rxl = &rxl_item->rxl;
+               if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+                    rxl->local_port == local_port) &&
+                   rxl->trap_id == rx_info->trap_id) {
+                       found = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       if (!found)
+               goto drop;
+
+       pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+       u64_stats_update_begin(&pcpu_stats->syncp);
+       pcpu_stats->port_rx_packets[local_port]++;
+       pcpu_stats->port_rx_bytes[local_port] += skb->len;
+       pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+       pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+       u64_stats_update_end(&pcpu_stats->syncp);
+
+       rxl->func(skb, local_port, rxl_item->priv);
+       return;
+
+drop:
+       if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+               this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+       else
+               this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+       if (local_port >= MLXSW_PORT_MAX_PORTS)
+               this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+       else
+               this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+       dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+                  u32 in_mod, bool out_mbox_direct,
+                  char *in_mbox, size_t in_mbox_size,
+                  char *out_mbox, size_t out_mbox_size)
+{
+       u8 status;
+       int err;
+
+       BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+       if (!mlxsw_core->bus->cmd_exec)
+               return -EOPNOTSUPP;
+
+       dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+               opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+       if (in_mbox) {
+               dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+               mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+       }
+
+       err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+                                       opcode_mod, in_mod, out_mbox_direct,
+                                       in_mbox, in_mbox_size,
+                                       out_mbox, out_mbox_size, &status);
+
+       if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+               dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+                       opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+                       in_mod, status, mlxsw_cmd_status_str(status));
+       } else if (err == -ETIMEDOUT) {
+               dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+                       opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+                       in_mod);
+       }
+
+       if (!err && out_mbox) {
+               dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+               mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+       }
+       return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+       mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+       if (!mlxsw_core_dbg_root)
+               return -ENOMEM;
+       return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+       debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644 (file)
index 0000000..2280b31
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind)        \
+       MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+                                  const struct mlxsw_bus *mlxsw_bus,
+                                  void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+       u8 local_port;
+       bool is_emad;
+};
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+                           const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+       void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+       u8 local_port;
+       u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+       void (*func)(const struct mlxsw_reg_info *reg,
+                    char *payload, void *priv);
+       enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+                                   const struct mlxsw_rx_listener *rxl,
+                                   void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+                                      const struct mlxsw_rx_listener *rxl,
+                                      void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+                                      const struct mlxsw_event_listener *el,
+                                      void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+                                         const struct mlxsw_event_listener *el,
+                                         void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+       u16 sys_port;
+       int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+                           struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+       u8      used_type:1,
+               used_properties:1;
+       u8      type;
+       u8      properties;
+};
+
+struct mlxsw_config_profile {
+       u16     used_max_vepa_channels:1,
+               used_max_lag:1,
+               used_max_port_per_lag:1,
+               used_max_mid:1,
+               used_max_pgt:1,
+               used_max_system_port:1,
+               used_max_vlan_groups:1,
+               used_max_regions:1,
+               used_flood_tables:1,
+               used_flood_mode:1,
+               used_max_ib_mc:1,
+               used_max_pkey:1,
+               used_ar_sec:1,
+               used_adaptive_routing_group_cap:1;
+       u8      max_vepa_channels;
+       u16     max_lag;
+       u16     max_port_per_lag;
+       u16     max_mid;
+       u16     max_pgt;
+       u16     max_system_port;
+       u16     max_vlan_groups;
+       u16     max_regions;
+       u8      max_flood_tables;
+       u8      max_vid_flood_tables;
+       u8      flood_mode;
+       u16     max_ib_mc;
+       u16     max_pkey;
+       u8      ar_sec;
+       u16     adaptive_routing_group_cap;
+       u8      arn;
+       struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+       struct list_head list;
+       const char *kind;
+       struct module *owner;
+       size_t priv_size;
+       int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_bus_info *mlxsw_bus_info);
+       void (*fini)(void *driver_priv);
+       void (*txhdr_construct)(struct sk_buff *skb,
+                               const struct mlxsw_tx_info *tx_info);
+       u8 txhdr_len;
+       const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+       const char *kind;
+       int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_config_profile *profile);
+       void (*fini)(void *bus_priv);
+       int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+                           const struct mlxsw_tx_info *tx_info);
+       int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+                       u32 in_mod, bool out_mbox_direct,
+                       char *in_mbox, size_t in_mbox_size,
+                       char *out_mbox, size_t out_mbox_size,
+                       u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+       const char *device_kind;
+       const char *device_name;
+       struct device *dev;
+       struct {
+               u16 major;
+               u16 minor;
+               u16 subminor;
+       } fw_rev;
+       u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+       u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644 (file)
index 0000000..97b6bb5
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518  /* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10    /* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+       MLXSW_EMAD_TLV_TYPE_END,
+       MLXSW_EMAD_TLV_TYPE_OP,
+       MLXSW_EMAD_TLV_TYPE_DR,
+       MLXSW_EMAD_TLV_TYPE_REG,
+       MLXSW_EMAD_TLV_TYPE_USERDATA,
+       MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4                /* Length in u32 */
+
+enum {
+       MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+       MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+       MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+       MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+       MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+       MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+       MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+       MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+       MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+       MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+       MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+       MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+       MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+       switch (status) {
+       case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+               return "operation performed";
+       case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+               return "device is busy";
+       case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+               return "version not supported";
+       case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+               return "unknown TLV";
+       case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+               return "register not supported";
+       case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+               return "class not supported";
+       case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+               return "method not supported";
+       case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+               return "bad parameter";
+       case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+               return "resource not available";
+       case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+               return "acknowledged. retransmit";
+       case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+               return "internal error";
+       default:
+               return "*UNKNOWN*";
+       }
+}
+
+enum {
+       MLXSW_EMAD_OP_TLV_REQUEST,
+       MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+       MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+       MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+       MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+       MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1       /* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644 (file)
index 0000000..4d0ac88
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+       unsigned short  offset;         /* bytes in container */
+       unsigned short  step;           /* step in bytes for indexed items */
+       unsigned short  in_step_offset; /* offset within one step */
+       unsigned char   shift;          /* shift in bits */
+       unsigned char   element_size;   /* size of element in bit array */
+       bool            no_real_shift;
+       union {
+               unsigned char   bits;
+               unsigned short  bytes;
+       } size;
+       const char      *name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+                   size_t typesize)
+{
+       BUG_ON(index && !item->step);
+       if (item->offset % typesize != 0 ||
+           item->step % typesize != 0 ||
+           item->in_step_offset % typesize != 0) {
+               pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%lx)\n",
+                      item->name, item->offset, item->step,
+                      item->in_step_offset, typesize);
+               BUG();
+       }
+
+       return ((item->offset + item->step * index + item->in_step_offset) /
+               typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+                                    unsigned short index)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+       __be16 *b = (__be16 *) buf;
+       u16 tmp;
+
+       tmp = be16_to_cpu(b[offset]);
+       tmp >>= item->shift;
+       tmp &= GENMASK(item->size.bits - 1, 0);
+       if (item->no_real_shift)
+               tmp <<= item->shift;
+       return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+                                     unsigned short index, u16 val)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index,
+                                                 sizeof(u16));
+       __be16 *b = (__be16 *) buf;
+       u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+       u16 tmp;
+
+       if (!item->no_real_shift)
+               val <<= item->shift;
+       val &= mask;
+       tmp = be16_to_cpu(b[offset]);
+       tmp &= ~mask;
+       tmp |= val;
+       b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+                                    unsigned short index)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+       __be32 *b = (__be32 *) buf;
+       u32 tmp;
+
+       tmp = be32_to_cpu(b[offset]);
+       tmp >>= item->shift;
+       tmp &= GENMASK(item->size.bits - 1, 0);
+       if (item->no_real_shift)
+               tmp <<= item->shift;
+       return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+                                     unsigned short index, u32 val)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index,
+                                                 sizeof(u32));
+       __be32 *b = (__be32 *) buf;
+       u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+       u32 tmp;
+
+       if (!item->no_real_shift)
+               val <<= item->shift;
+       val &= mask;
+       tmp = be32_to_cpu(b[offset]);
+       tmp &= ~mask;
+       tmp |= val;
+       b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+                                    unsigned short index)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+       __be64 *b = (__be64 *) buf;
+       u64 tmp;
+
+       tmp = be64_to_cpu(b[offset]);
+       tmp >>= item->shift;
+       tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+       if (item->no_real_shift)
+               tmp <<= item->shift;
+       return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+                                     unsigned short index, u64 val)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+       __be64 *b = (__be64 *) buf;
+       u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+       u64 tmp;
+
+       if (!item->no_real_shift)
+               val <<= item->shift;
+       val &= mask;
+       tmp = be64_to_cpu(b[offset]);
+       tmp &= ~mask;
+       tmp |= val;
+       b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+                                           struct mlxsw_item *item)
+{
+       memcpy(dst, &buf[item->offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
+                                         struct mlxsw_item *item)
+{
+       memcpy(&buf[item->offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+       u16 max_index, be_index;
+       u16 offset;             /* byte offset inside the array */
+
+       BUG_ON(index && !item->element_size);
+       if (item->offset % sizeof(u32) != 0 ||
+           BITS_PER_BYTE % item->element_size != 0) {
+               pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+                      item->name, item->offset, item->element_size);
+               BUG();
+       }
+
+       max_index = (item->size.bytes << 3) / item->element_size - 1;
+       be_index = max_index - index;
+       offset = be_index * item->element_size >> 3;
+       *shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+
+       return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+                                           u16 index)
+{
+       u8 shift, tmp;
+       u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+       tmp = buf[offset];
+       tmp >>= shift;
+       tmp &= GENMASK(item->element_size - 1, 0);
+       return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+                                             u16 index, u8 val)
+{
+       u8 shift, tmp;
+       u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+       u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+       val <<= shift;
+       val &= mask;
+       tmp = buf[offset];
+       tmp &= ~mask;
+       tmp |= val;
+       buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname)                                     \
+       mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .shift = _shift,                                                        \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)         \
+{                                                                              \
+       return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+}                                                                              \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{                                                                              \
+       __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);   \
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,        \
+                            _step, _instepoffset, _norealshift)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .step = _step,                                                          \
+       .in_step_offset = _instepoffset,                                        \
+       .shift = _shift,                                                        \
+       .no_real_shift = _norealshift,                                          \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u16                                                              \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)     \
+{                                                                              \
+       return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname),     \
+                                 index);                                       \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,     \
+                                         u16 val)                              \
+{                                                                              \
+       __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname),            \
+                          index, val);                                         \
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .shift = _shift,                                                        \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)         \
+{                                                                              \
+       return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+}                                                                              \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{                                                                              \
+       __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);   \
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,        \
+                            _step, _instepoffset, _norealshift)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .step = _step,                                                          \
+       .in_step_offset = _instepoffset,                                        \
+       .shift = _shift,                                                        \
+       .no_real_shift = _norealshift,                                          \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u32                                                              \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)     \
+{                                                                              \
+       return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname),     \
+                                 index);                                       \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,     \
+                                         u32 val)                              \
+{                                                                              \
+       __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname),            \
+                          index, val);                                         \
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .shift = _shift,                                                        \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)         \
+{                                                                              \
+       return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+}                                                                              \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{                                                                              \
+       __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);   \
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift,           \
+                            _sizebits, _step, _instepoffset, _norealshift)     \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .step = _step,                                                          \
+       .in_step_offset = _instepoffset,                                        \
+       .shift = _shift,                                                        \
+       .no_real_shift = _norealshift,                                          \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u64                                                              \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)     \
+{                                                                              \
+       return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname),     \
+                                 index);                                       \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,     \
+                                         u64 val)                              \
+{                                                                              \
+       __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname),            \
+                          index, val);                                         \
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes)             \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .size = {.bytes = _sizebytes,},                                         \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)                \
+{                                                                              \
+       __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src)          \
+{                                                                              \
+       __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname));  \
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,       \
+                            _element_size)                                     \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .element_size = _element_size,                                          \
+       .size = {.bytes = _sizebytes,},                                         \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u8                                                               \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index)                        \
+{                                                                              \
+       return __mlxsw_item_bit_array_get(buf,                                  \
+                                         &__ITEM_NAME(_type, _cname, _iname),  \
+                                         index);                               \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val)                \
+{                                                                              \
+       return __mlxsw_item_bit_array_set(buf,                                  \
+                                         &__ITEM_NAME(_type, _cname, _iname),  \
+                                         index, val);                          \
+}                                                                              \
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644 (file)
index 0000000..298ead5
--- /dev/null
@@ -0,0 +1,1794 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+       {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+       {0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+       switch (id->device) {
+       case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+               return MLXSW_DEVICE_KIND_SWITCHX2;
+       default:
+               BUG();
+       }
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+       iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+       ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+       MLXSW_PCI_QUEUE_TYPE_SDQ,
+       MLXSW_PCI_QUEUE_TYPE_RDQ,
+       MLXSW_PCI_QUEUE_TYPE_CQ,
+       MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+       switch (q_type) {
+       case MLXSW_PCI_QUEUE_TYPE_SDQ:
+               return "sdq";
+       case MLXSW_PCI_QUEUE_TYPE_RDQ:
+               return "rdq";
+       case MLXSW_PCI_QUEUE_TYPE_CQ:
+               return "cq";
+       case MLXSW_PCI_QUEUE_TYPE_EQ:
+               return "eq";
+       }
+       BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT     4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+       MLXSW_PCI_DOORBELL_SDQ_OFFSET,  /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+       MLXSW_PCI_DOORBELL_RDQ_OFFSET,  /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+       MLXSW_PCI_DOORBELL_CQ_OFFSET,   /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+       MLXSW_PCI_DOORBELL_EQ_OFFSET,   /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+       0, /* unused */
+       0, /* unused */
+       MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+       MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+       char *buf;
+       dma_addr_t mapaddr;
+       size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+       char *elem; /* pointer to actual dma mapped element mem chunk */
+       union {
+               struct {
+                       struct sk_buff *skb;
+               } sdq;
+               struct {
+                       struct sk_buff *skb;
+               } rdq;
+       } u;
+};
+
+struct mlxsw_pci_queue {
+       spinlock_t lock; /* for queue accesses */
+       struct mlxsw_pci_mem_item mem_item;
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       u16 producer_counter;
+       u16 consumer_counter;
+       u16 count; /* number of elements in queue */
+       u8 num; /* queue number */
+       u8 elem_size; /* size of one element */
+       enum mlxsw_pci_queue_type type;
+       struct tasklet_struct tasklet; /* queue processing tasklet */
+       struct mlxsw_pci *pci;
+       union {
+               struct {
+                       u32 comp_sdq_count;
+                       u32 comp_rdq_count;
+               } cq;
+               struct {
+                       u32 ev_cmd_count;
+                       u32 ev_comp_count;
+                       u32 ev_other_count;
+               } eq;
+       } u;
+};
+
+struct mlxsw_pci_queue_type_group {
+       struct mlxsw_pci_queue *q;
+       u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+       struct pci_dev *pdev;
+       u8 __iomem *hw_addr;
+       struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+       u32 doorbell_offset;
+       struct msix_entry msix_entry;
+       struct mlxsw_core *core;
+       struct {
+               u16 num_pages;
+               struct mlxsw_pci_mem_item *items;
+       } fw_area;
+       struct {
+               struct mutex lock; /* Lock access to command registers */
+               bool nopoll;
+               wait_queue_head_t wait;
+               bool wait_done;
+               struct {
+                       u8 status;
+                       u64 out_param;
+               } comp;
+       } cmd;
+       struct mlxsw_bus_info bus_info;
+       struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+       tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+                                       size_t elem_size, int elem_index)
+{
+       return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+       return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+       int index = q->producer_counter & (q->count - 1);
+
+       if ((q->producer_counter - q->consumer_counter) == q->count)
+               return NULL;
+       return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+       int index = q->consumer_counter & (q->count - 1);
+
+       return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+       return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+       return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+                                        u32 (*get_elem_owner_func)(char *))
+{
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       char *elem;
+       bool owner_bit;
+
+       elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+       elem = elem_info->elem;
+       owner_bit = get_elem_owner_func(elem);
+       if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+               return NULL;
+       q->consumer_counter++;
+       rmb(); /* make sure we read owned bit before the rest of elem */
+       return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+                              enum mlxsw_pci_queue_type q_type)
+{
+       return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+                                 enum mlxsw_pci_queue_type q_type)
+{
+       struct mlxsw_pci_queue_type_group *queue_group;
+
+       queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+       return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+                     enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+       return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+                                                u8 q_num)
+{
+       return __mlxsw_pci_queue_get(mlxsw_pci,
+                                    MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+                                                u8 q_num)
+{
+       return __mlxsw_pci_queue_get(mlxsw_pci,
+                                    MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+                                               u8 q_num)
+{
+       return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+                                               u8 q_num)
+{
+       return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+                                          struct mlxsw_pci_queue *q,
+                                          u16 val)
+{
+       mlxsw_pci_write32(mlxsw_pci,
+                         DOORBELL(mlxsw_pci->doorbell_offset,
+                                  mlxsw_pci_doorbell_type_offset[q->type],
+                                  q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+                                              struct mlxsw_pci_queue *q,
+                                              u16 val)
+{
+       mlxsw_pci_write32(mlxsw_pci,
+                         DOORBELL(mlxsw_pci->doorbell_offset,
+                                  mlxsw_pci_doorbell_arm_type_offset[q->type],
+                                  q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+                                                  struct mlxsw_pci_queue *q)
+{
+       wmb(); /* ensure all writes are done before we ring a bell */
+       __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+                                                  struct mlxsw_pci_queue *q)
+{
+       wmb(); /* ensure all writes are done before we ring a bell */
+       __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+                                      q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+                                          struct mlxsw_pci_queue *q)
+{
+       wmb(); /* ensure all writes are done before we ring a bell */
+       __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+                                            int page_index)
+{
+       return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                             struct mlxsw_pci_queue *q)
+{
+       int i;
+       int err;
+
+       q->producer_counter = 0;
+       q->consumer_counter = 0;
+
+       /* Set CQ of same number of this SDQ. */
+       mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+       mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+       mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+       for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+               dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+               mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+       }
+
+       err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+       if (err)
+               return err;
+       mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+       return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+                              struct mlxsw_pci_queue *q)
+{
+       mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+       struct mlxsw_pci_queue *q;
+       int i;
+       static const char hdr[] =
+               "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+               q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+               spin_lock_bh(&q->lock);
+               seq_printf(file, "%3d %10d %10d %5d\n",
+                          i, q->producer_counter, q->consumer_counter,
+                          q->count);
+               spin_unlock_bh(&q->lock);
+       }
+       return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+                                 int index, char *frag_data, size_t frag_len,
+                                 int direction)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       dma_addr_t mapaddr;
+
+       mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+       if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+               if (net_ratelimit())
+                       dev_err(&pdev->dev, "failed to dma map tx frag\n");
+               return -EIO;
+       }
+       mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+       mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+       return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+                                    int index, int direction)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+       dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+       if (!frag_len)
+               return;
+       pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+                                  struct mlxsw_pci_queue_elem_info *elem_info)
+{
+       size_t buf_len = MLXSW_PORT_MAX_MTU;
+       char *wqe = elem_info->elem;
+       struct sk_buff *skb;
+       int err;
+
+       elem_info->u.rdq.skb = NULL;
+       skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+       if (!skb)
+               return -ENOMEM;
+
+       /* Assume that wqe was previously zeroed. */
+
+       err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+                                    buf_len, DMA_FROM_DEVICE);
+       if (err)
+               goto err_frag_map;
+
+       elem_info->u.rdq.skb = skb;
+       return 0;
+
+err_frag_map:
+       dev_kfree_skb_any(skb);
+       return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+                                  struct mlxsw_pci_queue_elem_info *elem_info)
+{
+       struct sk_buff *skb;
+       char *wqe;
+
+       skb = elem_info->u.rdq.skb;
+       wqe = elem_info->elem;
+
+       mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+       dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                             struct mlxsw_pci_queue *q)
+{
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       int i;
+       int err;
+
+       q->producer_counter = 0;
+       q->consumer_counter = 0;
+
+       /* Set CQ of same number of this RDQ with base
+        * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+        */
+       mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+       mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+       for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+               dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+               mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+       }
+
+       err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+       if (err)
+               return err;
+
+       mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+       for (i = 0; i < q->count; i++) {
+               elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+               BUG_ON(!elem_info);
+               err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+               if (err)
+                       goto rollback;
+               /* Everything is set up, ring doorbell to pass elem to HW */
+               q->producer_counter++;
+               mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+       }
+
+       return 0;
+
+rollback:
+       for (i--; i >= 0; i--) {
+               elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+               mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+       }
+       mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+       return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+                              struct mlxsw_pci_queue *q)
+{
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       int i;
+
+       mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+       for (i = 0; i < q->count; i++) {
+               elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+               mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+       }
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+       struct mlxsw_pci_queue *q;
+       int i;
+       static const char hdr[] =
+               "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+               q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+               spin_lock_bh(&q->lock);
+               seq_printf(file, "%3d %10d %10d %5d\n",
+                          i, q->producer_counter, q->consumer_counter,
+                          q->count);
+               spin_unlock_bh(&q->lock);
+       }
+       return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                            struct mlxsw_pci_queue *q)
+{
+       int i;
+       int err;
+
+       q->consumer_counter = 0;
+
+       for (i = 0; i < q->count; i++) {
+               char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+               mlxsw_pci_cqe_owner_set(elem, 1);
+       }
+
+       mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+       mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+       mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+       mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+       mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+       for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+               dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+               mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+       }
+       err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+       if (err)
+               return err;
+       mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+       mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+       return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+                             struct mlxsw_pci_queue *q)
+{
+       mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+       struct mlxsw_pci_queue *q;
+       int i;
+       static const char hdr[] =
+               "NUM CONS_INDEX  SDQ_COUNT  RDQ_COUNT COUNT\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+               q = mlxsw_pci_cq_get(mlxsw_pci, i);
+               spin_lock_bh(&q->lock);
+               seq_printf(file, "%3d %10d %10d %10d %5d\n",
+                          i, q->consumer_counter, q->u.cq.comp_sdq_count,
+                          q->u.cq.comp_rdq_count, q->count);
+               spin_unlock_bh(&q->lock);
+       }
+       return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+                                    struct mlxsw_pci_queue *q,
+                                    u16 consumer_counter_limit,
+                                    char *cqe)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       char *wqe;
+       struct sk_buff *skb;
+       int i;
+
+       spin_lock(&q->lock);
+       elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+       skb = elem_info->u.sdq.skb;
+       wqe = elem_info->elem;
+       for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+               mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+       dev_kfree_skb_any(skb);
+       elem_info->u.sdq.skb = NULL;
+
+       if (q->consumer_counter++ != consumer_counter_limit)
+               dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+       spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+                                    struct mlxsw_pci_queue *q,
+                                    u16 consumer_counter_limit,
+                                    char *cqe)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       char *wqe;
+       struct sk_buff *skb;
+       struct mlxsw_rx_info rx_info;
+       int err;
+
+       elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+       skb = elem_info->u.sdq.skb;
+       if (!skb)
+               return;
+       wqe = elem_info->elem;
+       mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+       if (q->consumer_counter++ != consumer_counter_limit)
+               dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+       /* We do not support lag now */
+       if (mlxsw_pci_cqe_lag_get(cqe))
+               goto drop;
+
+       rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+       rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+       skb_put(skb, mlxsw_pci_cqe_byte_count_get(cqe));
+       mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+       memset(wqe, 0, q->elem_size);
+       err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+       if (err && net_ratelimit())
+               dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+       /* Everything is set up, ring doorbell to pass elem to HW */
+       q->producer_counter++;
+       mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+       return;
+
+drop:
+       dev_kfree_skb_any(skb);
+       goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+       return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+       struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+       struct mlxsw_pci *mlxsw_pci = q->pci;
+       char *cqe;
+       int items = 0;
+       int credits = q->count >> 1;
+
+       while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+               u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+               u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+               u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+               if (sendq) {
+                       struct mlxsw_pci_queue *sdq;
+
+                       sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+                       mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+                                                wqe_counter, cqe);
+                       q->u.cq.comp_sdq_count++;
+               } else {
+                       struct mlxsw_pci_queue *rdq;
+
+                       rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+                       mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+                                                wqe_counter, cqe);
+                       q->u.cq.comp_rdq_count++;
+               }
+               if (++items == credits)
+                       break;
+       }
+       if (items) {
+               mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+               mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+       }
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                            struct mlxsw_pci_queue *q)
+{
+       int i;
+       int err;
+
+       q->consumer_counter = 0;
+
+       for (i = 0; i < q->count; i++) {
+               char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+               mlxsw_pci_eqe_owner_set(elem, 1);
+       }
+
+       mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+       mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+       mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+       mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+       for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+               dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+               mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+       }
+       err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+       if (err)
+               return err;
+       mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+       mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+       return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+                             struct mlxsw_pci_queue *q)
+{
+       mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+       struct mlxsw_pci_queue *q;
+       int i;
+       static const char hdr[] =
+               "NUM CONS_COUNT     EV_CMD    EV_COMP   EV_OTHER COUNT\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+               q = mlxsw_pci_eq_get(mlxsw_pci, i);
+               spin_lock_bh(&q->lock);
+               seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+                          i, q->consumer_counter, q->u.eq.ev_cmd_count,
+                          q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+                          q->count);
+               spin_unlock_bh(&q->lock);
+       }
+       return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+       mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+       mlxsw_pci->cmd.comp.out_param =
+               ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+               mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+       mlxsw_pci->cmd.wait_done = true;
+       wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+       return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+       struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+       struct mlxsw_pci *mlxsw_pci = q->pci;
+       unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+       char *eqe;
+       u8 cqn;
+       bool cq_handle = false;
+       int items = 0;
+       int credits = q->count >> 1;
+
+       memset(&active_cqns, 0, sizeof(active_cqns));
+
+       while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+               u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+               switch (event_type) {
+               case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+                       mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+                       q->u.eq.ev_cmd_count++;
+                       break;
+               case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+                       cqn = mlxsw_pci_eqe_cqn_get(eqe);
+                       set_bit(cqn, active_cqns);
+                       cq_handle = true;
+                       q->u.eq.ev_comp_count++;
+                       break;
+               default:
+                       q->u.eq.ev_other_count++;
+               }
+               if (++items == credits)
+                       break;
+       }
+       if (items) {
+               mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+               mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+       }
+
+       if (!cq_handle)
+               return;
+       for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+               q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+               mlxsw_pci_queue_tasklet_schedule(q);
+       }
+}
+
+struct mlxsw_pci_queue_ops {
+       const char *name;
+       enum mlxsw_pci_queue_type type;
+       int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                   struct mlxsw_pci_queue *q);
+       void (*fini)(struct mlxsw_pci *mlxsw_pci,
+                    struct mlxsw_pci_queue *q);
+       void (*tasklet)(unsigned long data);
+       int (*dbg_read)(struct seq_file *s, void *data);
+       u16 elem_count;
+       u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+       .type           = MLXSW_PCI_QUEUE_TYPE_SDQ,
+       .init           = mlxsw_pci_sdq_init,
+       .fini           = mlxsw_pci_sdq_fini,
+       .dbg_read       = mlxsw_pci_sdq_dbg_read,
+       .elem_count     = MLXSW_PCI_WQE_COUNT,
+       .elem_size      = MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+       .type           = MLXSW_PCI_QUEUE_TYPE_RDQ,
+       .init           = mlxsw_pci_rdq_init,
+       .fini           = mlxsw_pci_rdq_fini,
+       .dbg_read       = mlxsw_pci_rdq_dbg_read,
+       .elem_count     = MLXSW_PCI_WQE_COUNT,
+       .elem_size      = MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+       .type           = MLXSW_PCI_QUEUE_TYPE_CQ,
+       .init           = mlxsw_pci_cq_init,
+       .fini           = mlxsw_pci_cq_fini,
+       .tasklet        = mlxsw_pci_cq_tasklet,
+       .dbg_read       = mlxsw_pci_cq_dbg_read,
+       .elem_count     = MLXSW_PCI_CQE_COUNT,
+       .elem_size      = MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+       .type           = MLXSW_PCI_QUEUE_TYPE_EQ,
+       .init           = mlxsw_pci_eq_init,
+       .fini           = mlxsw_pci_eq_fini,
+       .tasklet        = mlxsw_pci_eq_tasklet,
+       .dbg_read       = mlxsw_pci_eq_dbg_read,
+       .elem_count     = MLXSW_PCI_EQE_COUNT,
+       .elem_size      = MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                               const struct mlxsw_pci_queue_ops *q_ops,
+                               struct mlxsw_pci_queue *q, u8 q_num)
+{
+       struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+       int i;
+       int err;
+
+       spin_lock_init(&q->lock);
+       q->num = q_num;
+       q->count = q_ops->elem_count;
+       q->elem_size = q_ops->elem_size;
+       q->type = q_ops->type;
+       q->pci = mlxsw_pci;
+
+       if (q_ops->tasklet)
+               tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+       mem_item->size = MLXSW_PCI_AQ_SIZE;
+       mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+                                            mem_item->size,
+                                            &mem_item->mapaddr);
+       if (!mem_item->buf)
+               return -ENOMEM;
+       memset(mem_item->buf, 0, mem_item->size);
+
+       q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+       if (!q->elem_info) {
+               err = -ENOMEM;
+               goto err_elem_info_alloc;
+       }
+
+       /* Initialize dma mapped elements info elem_info for
+        * future easy access.
+        */
+       for (i = 0; i < q->count; i++) {
+               struct mlxsw_pci_queue_elem_info *elem_info;
+
+               elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+               elem_info->elem =
+                       __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+       }
+
+       mlxsw_cmd_mbox_zero(mbox);
+       err = q_ops->init(mlxsw_pci, mbox, q);
+       if (err)
+               goto err_q_ops_init;
+       return 0;
+
+err_q_ops_init:
+       kfree(q->elem_info);
+err_elem_info_alloc:
+       pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+                           mem_item->buf, mem_item->mapaddr);
+       return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+                                const struct mlxsw_pci_queue_ops *q_ops,
+                                struct mlxsw_pci_queue *q)
+{
+       struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+       q_ops->fini(mlxsw_pci, q);
+       kfree(q->elem_info);
+       pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+                           mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                                     const struct mlxsw_pci_queue_ops *q_ops,
+                                     u8 num_qs)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       struct mlxsw_pci_queue_type_group *queue_group;
+       char tmp[16];
+       int i;
+       int err;
+
+       queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+       queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+       if (!queue_group->q)
+               return -ENOMEM;
+
+       for (i = 0; i < num_qs; i++) {
+               err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+                                          &queue_group->q[i], i);
+               if (err)
+                       goto err_queue_init;
+       }
+       queue_group->count = num_qs;
+
+       sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+       debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+                                   q_ops->dbg_read);
+
+       return 0;
+
+err_queue_init:
+       for (i--; i >= 0; i--)
+               mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+       kfree(queue_group->q);
+       return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+                                      const struct mlxsw_pci_queue_ops *q_ops)
+{
+       struct mlxsw_pci_queue_type_group *queue_group;
+       int i;
+
+       queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+       for (i = 0; i < queue_group->count; i++)
+               mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+       kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       u8 num_sdqs;
+       u8 sdq_log2sz;
+       u8 num_rdqs;
+       u8 rdq_log2sz;
+       u8 num_cqs;
+       u8 cq_log2sz;
+       u8 num_eqs;
+       u8 eq_log2sz;
+       int err;
+
+       mlxsw_cmd_mbox_zero(mbox);
+       err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+       if (err)
+               return err;
+
+       num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+       sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+       num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+       rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+       num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+       cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+       num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+       eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+       if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
+           (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
+           (num_cqs != MLXSW_PCI_CQS_COUNT) ||
+           (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+               dev_err(&pdev->dev, "Unsupported number of queues\n");
+               return -EINVAL;
+       }
+
+       if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+           (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+           (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+           (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+               dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+               return -EINVAL;
+       }
+
+       err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+                                        num_eqs);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize event queues\n");
+               return err;
+       }
+
+       err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+                                        num_cqs);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+               goto err_cqs_init;
+       }
+
+       err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+                                        num_sdqs);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+               goto err_sdqs_init;
+       }
+
+       err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+                                        num_rdqs);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+               goto err_rdqs_init;
+       }
+
+       /* We have to poll in command interface until queues are initialized */
+       mlxsw_pci->cmd.nopoll = true;
+       return 0;
+
+err_rdqs_init:
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+       return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+       mlxsw_pci->cmd.nopoll = false;
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+                                    char *mbox, int index,
+                                    const struct mlxsw_swid_config *swid)
+{
+       u8 mask = 0;
+
+       if (swid->used_type) {
+               mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+                       mbox, index, swid->type);
+               mask |= 1;
+       }
+       if (swid->used_properties) {
+               mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+                       mbox, index, swid->properties);
+               mask |= 2;
+       }
+       mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                                   const struct mlxsw_config_profile *profile)
+{
+       int i;
+
+       mlxsw_cmd_mbox_zero(mbox);
+
+       if (profile->used_max_vepa_channels) {
+               mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+                       mbox, profile->max_vepa_channels);
+       }
+       if (profile->used_max_lag) {
+               mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_lag_set(
+                       mbox, profile->max_lag);
+       }
+       if (profile->used_max_port_per_lag) {
+               mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+                       mbox, profile->max_port_per_lag);
+       }
+       if (profile->used_max_mid) {
+               mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_mid_set(
+                       mbox, profile->max_mid);
+       }
+       if (profile->used_max_pgt) {
+               mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_pgt_set(
+                       mbox, profile->max_pgt);
+       }
+       if (profile->used_max_system_port) {
+               mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_system_port_set(
+                       mbox, profile->max_system_port);
+       }
+       if (profile->used_max_vlan_groups) {
+               mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+                       mbox, profile->max_vlan_groups);
+       }
+       if (profile->used_max_regions) {
+               mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_regions_set(
+                       mbox, profile->max_regions);
+       }
+       if (profile->used_flood_tables) {
+               mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+                       mbox, profile->max_flood_tables);
+               mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+                       mbox, profile->max_vid_flood_tables);
+       }
+       if (profile->used_flood_mode) {
+               mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_flood_mode_set(
+                       mbox, profile->flood_mode);
+       }
+       if (profile->used_max_ib_mc) {
+               mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+                       mbox, profile->max_ib_mc);
+       }
+       if (profile->used_max_pkey) {
+               mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_pkey_set(
+                       mbox, profile->max_pkey);
+       }
+       if (profile->used_ar_sec) {
+               mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_ar_sec_set(
+                       mbox, profile->ar_sec);
+       }
+       if (profile->used_adaptive_routing_group_cap) {
+               mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+                       mbox, profile->adaptive_routing_group_cap);
+       }
+
+       for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+               mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+                                                    &profile->swid_config[i]);
+
+       return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+       struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+       int err;
+
+       mlxsw_cmd_mbox_zero(mbox);
+       err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+       if (err)
+               return err;
+       mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+       mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+       return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                                 u16 num_pages)
+{
+       struct mlxsw_pci_mem_item *mem_item;
+       int i;
+       int err;
+
+       mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+                                          GFP_KERNEL);
+       if (!mlxsw_pci->fw_area.items)
+               return -ENOMEM;
+       mlxsw_pci->fw_area.num_pages = num_pages;
+
+       mlxsw_cmd_mbox_zero(mbox);
+       for (i = 0; i < num_pages; i++) {
+               mem_item = &mlxsw_pci->fw_area.items[i];
+
+               mem_item->size = MLXSW_PCI_PAGE_SIZE;
+               mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+                                                    mem_item->size,
+                                                    &mem_item->mapaddr);
+               if (!mem_item->buf) {
+                       err = -ENOMEM;
+                       goto err_alloc;
+               }
+               mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
+               mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+       }
+
+       err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
+       if (err)
+               goto err_cmd_map_fa;
+
+       return 0;
+
+err_cmd_map_fa:
+err_alloc:
+       for (i--; i >= 0; i--) {
+               mem_item = &mlxsw_pci->fw_area.items[i];
+
+               pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+                                   mem_item->buf, mem_item->mapaddr);
+       }
+       kfree(mlxsw_pci->fw_area.items);
+       return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+       struct mlxsw_pci_mem_item *mem_item;
+       int i;
+
+       mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+       for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+               mem_item = &mlxsw_pci->fw_area.items[i];
+
+               pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+                                   mem_item->buf, mem_item->mapaddr);
+       }
+       kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_id;
+       struct mlxsw_pci_queue *q;
+       int i;
+
+       for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+               q = mlxsw_pci_eq_get(mlxsw_pci, i);
+               mlxsw_pci_queue_tasklet_schedule(q);
+       }
+       return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+                         const struct mlxsw_config_profile *profile)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       char *mbox;
+       u16 num_pages;
+       int err;
+
+       mutex_init(&mlxsw_pci->cmd.lock);
+       init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+       mlxsw_pci->core = mlxsw_core;
+
+       mbox = mlxsw_cmd_mbox_alloc();
+       if (!mbox)
+               return -ENOMEM;
+       err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+       if (err)
+               goto err_query_fw;
+
+       mlxsw_pci->bus_info.fw_rev.major =
+               mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+       mlxsw_pci->bus_info.fw_rev.minor =
+               mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+       mlxsw_pci->bus_info.fw_rev.subminor =
+               mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+       if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+               dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+               err = -EINVAL;
+               goto err_iface_rev;
+       }
+       if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+               dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+               err = -EINVAL;
+               goto err_doorbell_page_bar;
+       }
+
+       mlxsw_pci->doorbell_offset =
+               mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+       num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+       err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+       if (err)
+               goto err_fw_area_init;
+
+       err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+       if (err)
+               goto err_boardinfo;
+
+       err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+       if (err)
+               goto err_config_profile;
+
+       err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+       if (err)
+               goto err_aqs_init;
+
+       err = request_irq(mlxsw_pci->msix_entry.vector,
+                         mlxsw_pci_eq_irq_handler, 0,
+                         mlxsw_pci_driver_name, mlxsw_pci);
+       if (err) {
+               dev_err(&pdev->dev, "IRQ request failed\n");
+               goto err_request_eq_irq;
+       }
+
+       goto mbox_put;
+
+err_request_eq_irq:
+       mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+       mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+mbox_put:
+       mlxsw_cmd_mbox_free(mbox);
+       return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+       free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+       mlxsw_pci_aqs_fini(mlxsw_pci);
+       mlxsw_pci_fw_area_fini(mlxsw_pci);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+                  const struct mlxsw_tx_info *tx_info)
+{
+       u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+       return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+                                 const struct mlxsw_tx_info *tx_info)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+       struct mlxsw_pci_queue *q;
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       char *wqe;
+       int i;
+       int err;
+
+       if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+               err = skb_linearize(skb);
+               if (err)
+                       return err;
+       }
+
+       q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+       spin_lock_bh(&q->lock);
+       elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+       if (!elem_info) {
+               /* queue is full */
+               err = -EAGAIN;
+               goto unlock;
+       }
+       elem_info->u.sdq.skb = skb;
+
+       wqe = elem_info->elem;
+       mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+       mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+       mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+       err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+                                    skb_headlen(skb), DMA_TO_DEVICE);
+       if (err)
+               goto unlock;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+                                            skb_frag_address(frag),
+                                            skb_frag_size(frag),
+                                            DMA_TO_DEVICE);
+               if (err)
+                       goto unmap_frags;
+       }
+
+       /* Set unused sq entries byte count to zero. */
+       for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+               mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+       /* Everything is set up, ring producer doorbell to get HW going */
+       q->producer_counter++;
+       mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+       goto unlock;
+
+unmap_frags:
+       for (; i >= 0; i--)
+               mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+       spin_unlock_bh(&q->lock);
+       return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+                             u32 in_mod, bool out_mbox_direct,
+                             char *in_mbox, size_t in_mbox_size,
+                             char *out_mbox, size_t out_mbox_size,
+                             u8 *p_status)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+       dma_addr_t in_mapaddr = 0;
+       dma_addr_t out_mapaddr = 0;
+       bool evreq = mlxsw_pci->cmd.nopoll;
+       unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+       bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+       int err;
+
+       *p_status = MLXSW_CMD_STATUS_OK;
+
+       err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+       if (err)
+               return err;
+
+       if (in_mbox) {
+               in_mapaddr = pci_map_single(mlxsw_pci->pdev, in_mbox,
+                                           in_mbox_size, PCI_DMA_TODEVICE);
+               if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
+                                                  in_mapaddr))) {
+                       err = -EIO;
+                       goto err_in_mbox_map;
+               }
+       }
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+
+       if (out_mbox) {
+               out_mapaddr = pci_map_single(mlxsw_pci->pdev, out_mbox,
+                                            out_mbox_size, PCI_DMA_FROMDEVICE);
+               if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
+                                                  out_mapaddr))) {
+                       err = -EIO;
+                       goto err_out_mbox_map;
+               }
+       }
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+       mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+       *p_wait_done = false;
+
+       wmb(); /* all needs to be written before we write control register */
+       mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+                         MLXSW_PCI_CIR_CTRL_GO_BIT |
+                         (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+                         (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+                         opcode);
+
+       if (!evreq) {
+               unsigned long end;
+
+               end = jiffies + timeout;
+               do {
+                       u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+                       if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+                               *p_wait_done = true;
+                               *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+                               break;
+                       }
+                       cond_resched();
+               } while (time_before(jiffies, end));
+       } else {
+               wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+               *p_status = mlxsw_pci->cmd.comp.status;
+       }
+
+       err = 0;
+       if (*p_wait_done) {
+               if (*p_status)
+                       err = -EIO;
+       } else {
+               err = -ETIMEDOUT;
+       }
+
+       if (!err && out_mbox && out_mbox_direct) {
+               /* Some commands does not use output param as address to mailbox
+                * but they store output directly into registers. In that case,
+                * copy registers into mbox buffer.
+                */
+               __be32 tmp;
+
+               if (!evreq) {
+                       tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+                                                          CIR_OUT_PARAM_HI));
+                       memcpy(out_mbox, &tmp, sizeof(tmp));
+                       tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+                                                          CIR_OUT_PARAM_LO));
+                       memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+               }
+       }
+
+       if (out_mapaddr)
+               pci_unmap_single(mlxsw_pci->pdev, out_mapaddr, out_mbox_size,
+                                PCI_DMA_FROMDEVICE);
+
+       /* fall through */
+
+err_out_mbox_map:
+       if (in_mapaddr)
+               pci_unmap_single(mlxsw_pci->pdev, in_mapaddr, in_mbox_size,
+                                PCI_DMA_TODEVICE);
+err_in_mbox_map:
+       mutex_unlock(&mlxsw_pci->cmd.lock);
+
+       return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+       .kind           = "pci",
+       .init           = mlxsw_pci_init,
+       .fini           = mlxsw_pci_fini,
+       .skb_transmit   = mlxsw_pci_skb_transmit,
+       .cmd_exec       = mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+       mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+       /* Current firware does not let us know when the reset is done.
+        * So we just wait here for constant time and hope for the best.
+        */
+       msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+       return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct mlxsw_pci *mlxsw_pci;
+       int err;
+
+       mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+       if (!mlxsw_pci)
+               return -ENOMEM;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "pci_enable_device failed\n");
+               goto err_pci_enable_device;
+       }
+
+       err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+       if (err) {
+               dev_err(&pdev->dev, "pci_request_regions failed\n");
+               goto err_pci_request_regions;
+       }
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (!err) {
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+               if (err) {
+                       dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+                       goto err_pci_set_dma_mask;
+               }
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+                       goto err_pci_set_dma_mask;
+               }
+       }
+
+       if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+               dev_err(&pdev->dev, "invalid PCI region size\n");
+               err = -EINVAL;
+               goto err_pci_resource_len_check;
+       }
+
+       mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+                                    pci_resource_len(pdev, 0));
+       if (!mlxsw_pci->hw_addr) {
+               dev_err(&pdev->dev, "ioremap failed\n");
+               err = -EIO;
+               goto err_ioremap;
+       }
+       pci_set_master(pdev);
+
+       mlxsw_pci->pdev = pdev;
+       pci_set_drvdata(pdev, mlxsw_pci);
+
+       err = mlxsw_pci_sw_reset(mlxsw_pci);
+       if (err) {
+               dev_err(&pdev->dev, "Software reset failed\n");
+               goto err_sw_reset;
+       }
+
+       err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+       if (err) {
+               dev_err(&pdev->dev, "MSI-X init failed\n");
+               goto err_msix_init;
+       }
+
+       mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+       mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+       mlxsw_pci->bus_info.dev = &pdev->dev;
+
+       mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+                                               mlxsw_pci_dbg_root);
+       if (!mlxsw_pci->dbg_dir) {
+               dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+               goto err_dbg_create_dir;
+       }
+
+       err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+                                            &mlxsw_pci_bus, mlxsw_pci);
+       if (err) {
+               dev_err(&pdev->dev, "cannot register bus device\n");
+               goto err_bus_device_register;
+       }
+
+       return 0;
+
+err_bus_device_register:
+       debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+       pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+       iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+       pci_release_regions(pdev);
+err_pci_request_regions:
+       pci_disable_device(pdev);
+err_pci_enable_device:
+       kfree(mlxsw_pci);
+       return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+       struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+       mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+       debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+       pci_disable_msix(mlxsw_pci->pdev);
+       iounmap(mlxsw_pci->hw_addr);
+       pci_release_regions(mlxsw_pci->pdev);
+       pci_disable_device(mlxsw_pci->pdev);
+       kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+       .name           = mlxsw_pci_driver_name,
+       .id_table       = mlxsw_pci_id_table,
+       .probe          = mlxsw_pci_probe,
+       .remove         = mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+       int err;
+
+       mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+       if (!mlxsw_pci_dbg_root)
+               return -ENOMEM;
+       err = pci_register_driver(&mlxsw_pci_driver);
+       if (err)
+               goto err_register_driver;
+       return 0;
+
+err_register_driver:
+       debugfs_remove_recursive(mlxsw_pci_dbg_root);
+       return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+       pci_unregister_driver(&mlxsw_pci_driver);
+       debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644 (file)
index 0000000..887af84
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2        0xc738
+#define MLXSW_PCI_BAR0_SIZE            (1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE            4096
+
+#define MLXSW_PCI_CIR_BASE                     0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI              MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO              (MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER              (MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI             (MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO             (MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN                    (MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL                     (MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT              BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT           BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT    12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT                24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS            1000
+
+#define MLXSW_PCI_SW_RESET                     0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT             BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET          0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET          0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET           0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET           0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET       0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET       0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num)   \
+       ((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_RDQS_COUNT   24
+#define MLXSW_PCI_SDQS_COUNT   24
+#define MLXSW_PCI_CQS_COUNT    (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_EQS_COUNT    2
+#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQ_COMP_NUM  1
+
+#define MLXSW_PCI_AQ_PAGES     8
+#define MLXSW_PCI_AQ_SIZE      (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE     32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE     16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE     16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT    (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT    (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT    (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT     0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES       3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET    0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP  0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD   0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644 (file)
index 0000000..726f543
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU             10000
+
+#define MLXSW_PORT_DEFAULT_VID         1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT  255
+#define MLXSW_PORT_SWID_ALL_SWIDS      254
+#define MLXSW_PORT_SWID_TYPE_ETH       2
+
+#define MLXSW_PORT_MID                 0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS       0x40
+#define MLXSW_PORT_MAX_PORTS           MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET   10
+#define MLXSW_PORT_PHY_BITS_OFFSET     4
+#define MLXSW_PORT_PHY_BITS_MASK       (MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT            0x0
+
+#define MLXSW_PORT_DONT_CARE           (MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+       MLXSW_PORT_ADMIN_STATUS_UP = 1,
+       MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+       MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+       MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+       MLXSW_PORT_OPER_STATUS_UP = 1,
+       MLXSW_PORT_OPER_STATUS_DOWN = 2,
+       MLXSW_PORT_OPER_STATUS_FAILURE = 4,     /* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644 (file)
index 0000000..b5a72f8
--- /dev/null
@@ -0,0 +1,1289 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+       u16 id;
+       u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+       .id = MLXSW_REG_SGCR_ID,
+       .len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+       MLXSW_REG_ZERO(sgcr, payload);
+       mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+       .id = MLXSW_REG_SPAD_ID,
+       .len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SMID - Switch Multicast ID
+ * --------------------------
+ * In multi-chip configuration, each device should maintain mapping between
+ * Multicast ID (MID) into a list of local ports. This mapping is used in all
+ * the devices other than the ingress device, and is implemented as part of the
+ * FDB. The MID record maps from a MID, which is a unique identi- fier of the
+ * multicast group within the stacking domain, into a list of local ports into
+ * which the packet is replicated.
+ */
+#define MLXSW_REG_SMID_ID 0x2007
+#define MLXSW_REG_SMID_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_smid = {
+       .id = MLXSW_REG_SMID_ID,
+       .len = MLXSW_REG_SMID_LEN,
+};
+
+/* reg_smid_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
+
+/* reg_smid_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
+
+/* reg_smid_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
+
+/* reg_smid_port_mask
+ * Local port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
+{
+       MLXSW_REG_ZERO(smid, payload);
+       mlxsw_reg_smid_swid_set(payload, 0);
+       mlxsw_reg_smid_mid_set(payload, mid);
+       mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+       mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+       .id = MLXSW_REG_SPMS_ID,
+       .len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+       MLXSW_REG_SPMS_STATE_NO_CHANGE,
+       MLXSW_REG_SPMS_STATE_DISCARDING,
+       MLXSW_REG_SPMS_STATE_LEARNING,
+       MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
+                                      enum mlxsw_reg_spms_state state)
+{
+       MLXSW_REG_ZERO(spms, payload);
+       mlxsw_reg_spms_local_port_set(payload, local_port);
+       mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID  0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+       .id = MLXSW_REG_SFGC_ID,
+       .len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+       MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
+       MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
+       MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
+       MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+       MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+       MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+       MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+       MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+       MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+       MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+       MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+                   enum mlxsw_reg_sfgc_bridge_type bridge_type,
+                   enum mlxsw_flood_table_type table_type,
+                   unsigned int flood_table)
+{
+       MLXSW_REG_ZERO(sfgc, payload);
+       mlxsw_reg_sfgc_type_set(payload, type);
+       mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+       mlxsw_reg_sfgc_table_type_set(payload, table_type);
+       mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+       mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+       .id = MLXSW_REG_SFTR_ID,
+       .len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+                                      unsigned int flood_table,
+                                      unsigned int index,
+                                      enum mlxsw_flood_table_type table_type,
+                                      unsigned int range)
+{
+       MLXSW_REG_ZERO(sftr, payload);
+       mlxsw_reg_sftr_swid_set(payload, 0);
+       mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+       mlxsw_reg_sftr_index_set(payload, index);
+       mlxsw_reg_sftr_table_type_set(payload, table_type);
+       mlxsw_reg_sftr_range_set(payload, range);
+       mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+       mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+       .id = MLXSW_REG_SPMLR_ID,
+       .len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+       MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+       MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+       MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+                                       enum mlxsw_reg_spmlr_learn_mode mode)
+{
+       MLXSW_REG_ZERO(spmlr, payload);
+       mlxsw_reg_spmlr_local_port_set(payload, local_port);
+       mlxsw_reg_spmlr_sub_port_set(payload, 0);
+       mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+       .id = MLXSW_REG_PMLP_ID,
+       .len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+       MLXSW_REG_ZERO(pmlp, payload);
+       mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+       .id = MLXSW_REG_PMTU_ID,
+       .len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+                                      u16 new_mtu)
+{
+       MLXSW_REG_ZERO(pmtu, payload);
+       mlxsw_reg_pmtu_local_port_set(payload, local_port);
+       mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+       mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+       mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+       .id = MLXSW_REG_PTYS_ID,
+       .len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH  BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII                 BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX           BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4           BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4           BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR            BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2           BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4           BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4           BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4            BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR            BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR            BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR         BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4           BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4       BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4           BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4          BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4          BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4          BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4      BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX            BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T             BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T             BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR            BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR            BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR            BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2           BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2           BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+                                      u32 proto_admin)
+{
+       MLXSW_REG_ZERO(ptys, payload);
+       mlxsw_reg_ptys_local_port_set(payload, local_port);
+       mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+       mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+                                        u32 *p_eth_proto_adm,
+                                        u32 *p_eth_proto_oper)
+{
+       if (p_eth_proto_cap)
+               *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+       if (p_eth_proto_adm)
+               *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+       if (p_eth_proto_oper)
+               *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+       .id = MLXSW_REG_PPAD_ID,
+       .len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+                                      u8 local_port)
+{
+       MLXSW_REG_ZERO(ppad, payload);
+       mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+       mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+       .id = MLXSW_REG_PAOS_ID,
+       .len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+                                      enum mlxsw_port_admin_status status)
+{
+       MLXSW_REG_ZERO(paos, payload);
+       mlxsw_reg_paos_swid_set(payload, 0);
+       mlxsw_reg_paos_local_port_set(payload, local_port);
+       mlxsw_reg_paos_admin_status_set(payload, status);
+       mlxsw_reg_paos_oper_status_set(payload, 0);
+       mlxsw_reg_paos_ase_set(payload, 1);
+       mlxsw_reg_paos_ee_set(payload, 1);
+       mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+       .id = MLXSW_REG_PPCNT_ID,
+       .len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+            0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+            0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+            0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+            0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+            0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+            0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+            0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+            0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+            0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+            0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+            0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+            0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+            0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+            0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+            0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+            0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+            0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+            0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+            0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+       MLXSW_REG_ZERO(ppcnt, payload);
+       mlxsw_reg_ppcnt_swid_set(payload, 0);
+       mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+       mlxsw_reg_ppcnt_pnat_set(payload, 0);
+       mlxsw_reg_ppcnt_grp_set(payload, 0);
+       mlxsw_reg_ppcnt_clr_set(payload, 0);
+       mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+       .id = MLXSW_REG_PSPA_ID,
+       .len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+       MLXSW_REG_ZERO(pspa, payload);
+       mlxsw_reg_pspa_swid_set(payload, swid);
+       mlxsw_reg_pspa_local_port_set(payload, local_port);
+       mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+       .id = MLXSW_REG_HTGT_ID,
+       .len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0     /* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
+#define MLXSW_REG_HTGT_TRAP_GROUP_RX   0x1
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+       MLXSW_REG_HTGT_POLICER_DISABLE,
+       MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD     0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX       0x14
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+{
+       u8 swid, rdq;
+
+       MLXSW_REG_ZERO(htgt, payload);
+       if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+               swid = MLXSW_PORT_SWID_ALL_SWIDS;
+               rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+       } else {
+               swid = 0;
+               rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+       }
+       mlxsw_reg_htgt_swid_set(payload, swid);
+       mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+       mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+       mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+       mlxsw_reg_htgt_pid_set(payload, 0);
+       mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+       mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+       mlxsw_reg_htgt_priority_set(payload, 0);
+       mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+       mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+       .id = MLXSW_REG_HPKT_ID,
+       .len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+       MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+       MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+       MLXSW_REG_HPKT_ACTION_FORWARD,
+       MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+       MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+       MLXSW_REG_HPKT_ACTION_DISCARD,
+       MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+       MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+       MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+       MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+       MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
+                                      u8 trap_group, u16 trap_id)
+{
+       MLXSW_REG_ZERO(hpkt, payload);
+       mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+       mlxsw_reg_hpkt_action_set(payload, action);
+       mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+       mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+       mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+       switch (reg_id) {
+       case MLXSW_REG_SGCR_ID:
+               return "SGCR";
+       case MLXSW_REG_SPAD_ID:
+               return "SPAD";
+       case MLXSW_REG_SMID_ID:
+               return "SMID";
+       case MLXSW_REG_SPMS_ID:
+               return "SPMS";
+       case MLXSW_REG_SFGC_ID:
+               return "SFGC";
+       case MLXSW_REG_SFTR_ID:
+               return "SFTR";
+       case MLXSW_REG_SPMLR_ID:
+               return "SPMLR";
+       case MLXSW_REG_PMLP_ID:
+               return "PMLP";
+       case MLXSW_REG_PMTU_ID:
+               return "PMTU";
+       case MLXSW_REG_PTYS_ID:
+               return "PTYS";
+       case MLXSW_REG_PPAD_ID:
+               return "PPAD";
+       case MLXSW_REG_PAOS_ID:
+               return "PAOS";
+       case MLXSW_REG_PPCNT_ID:
+               return "PPCNT";
+       case MLXSW_REG_PSPA_ID:
+               return "PSPA";
+       case MLXSW_REG_HTGT_ID:
+               return "HTGT";
+       case MLXSW_REG_HPKT_ID:
+               return "HPKT";
+       default:
+               return "*UNKNOWN*";
+       }
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644 (file)
index 0000000..29b46ee
--- /dev/null
@@ -0,0 +1,1552 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+#define MLXSW_SW_HW_ID_LEN 6
+
+struct mlxsw_sx {
+       struct mlxsw_sx_port **ports;
+       struct mlxsw_core *core;
+       const struct mlxsw_bus_info *bus_info;
+       u8 hw_id[MLXSW_SW_HW_ID_LEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       struct u64_stats_sync   syncp;
+       u32                     tx_dropped;
+};
+
+struct mlxsw_sx_port {
+       struct net_device *dev;
+       struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+       struct mlxsw_sx *mlxsw_sx;
+       u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+                                    const struct mlxsw_tx_info *tx_info)
+{
+       char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+       bool is_emad = tx_info->is_emad;
+
+       memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+       /* We currently set default values for the egress tclass (QoS). */
+       mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+       mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+       mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+       mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+                                                 MLXSW_TXHDR_ETCLASS_5);
+       mlxsw_tx_hdr_swid_set(txhdr, 0);
+       mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+       mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+       mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+                                             MLXSW_TXHDR_RDQ_OTHER);
+       mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+       mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+       mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+       mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+                                              MLXSW_TXHDR_NOT_EMAD);
+       mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+                                         bool is_up)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char paos_pl[MLXSW_REG_PAOS_LEN];
+
+       mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+                           is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+                           MLXSW_PORT_ADMIN_STATUS_DOWN);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+                                        bool *p_is_up)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char paos_pl[MLXSW_REG_PAOS_LEN];
+       u8 oper_status;
+       int err;
+
+       mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+       if (err)
+               return err;
+       oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+       *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+       return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char pmtu_pl[MLXSW_REG_PMTU_LEN];
+       int max_mtu;
+       int err;
+
+       mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+       mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+       if (err)
+               return err;
+       max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+       if (mtu > max_mtu)
+               return -EINVAL;
+
+       mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+       mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+                                     bool *p_usable)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char pmlp_pl[MLXSW_REG_PMLP_LEN];
+       int err;
+
+       mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+       if (err)
+               return err;
+       *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+       return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       int err;
+
+       err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+       if (err)
+               return err;
+       netif_start_queue(dev);
+       return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+       return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+                                     struct net_device *dev)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+       const struct mlxsw_tx_info tx_info = {
+               .local_port = mlxsw_sx_port->local_port,
+               .is_emad = false,
+       };
+       struct sk_buff *skb_old = NULL;
+       int err;
+
+       if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+               struct sk_buff *skb_new;
+
+               skb_old = skb;
+               skb_new = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+               if (!skb_new) {
+                       this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+                       dev_kfree_skb_any(skb_old);
+                       return NETDEV_TX_OK;
+               }
+               skb = skb_new;
+       }
+       mlxsw_sx_txhdr_construct(skb, &tx_info);
+       err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+       if (err == -EAGAIN) {
+               if (skb_old)
+                       dev_kfree_skb_any(skb);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (skb_old)
+               dev_kfree_skb_any(skb_old);
+
+       if (!err) {
+               pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->tx_packets++;
+               pcpu_stats->tx_bytes += skb->len;
+               u64_stats_update_end(&pcpu_stats->syncp);
+       } else {
+               this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+               dev_kfree_skb_any(skb);
+       }
+       return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       int err;
+
+       err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+       if (err)
+               return err;
+       dev->mtu = mtu;
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+                         struct rtnl_link_stats64 *stats)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx_port_pcpu_stats *p;
+       u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+       u32 tx_dropped = 0;
+       unsigned int start;
+       int i;
+
+       for_each_possible_cpu(i) {
+               p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+               do {
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
+                       rx_packets      = p->rx_packets;
+                       rx_bytes        = p->rx_bytes;
+                       tx_packets      = p->tx_packets;
+                       tx_bytes        = p->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+               stats->rx_packets       += rx_packets;
+               stats->rx_bytes         += rx_bytes;
+               stats->tx_packets       += tx_packets;
+               stats->tx_bytes         += tx_bytes;
+               /* tx_dropped is u32, updated without syncp protection. */
+               tx_dropped      += p->tx_dropped;
+       }
+       stats->tx_dropped       = tx_dropped;
+       return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+       .ndo_open               = mlxsw_sx_port_open,
+       .ndo_stop               = mlxsw_sx_port_stop,
+       .ndo_start_xmit         = mlxsw_sx_port_xmit,
+       .ndo_change_mtu         = mlxsw_sx_port_change_mtu,
+       .ndo_get_stats64        = mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+                                     struct ethtool_drvinfo *drvinfo)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+       strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "%d.%d.%d",
+                mlxsw_sx->bus_info->fw_rev.major,
+                mlxsw_sx->bus_info->fw_rev.minor,
+                mlxsw_sx->bus_info->fw_rev.subminor);
+       strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+               sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+       char str[ETH_GSTRING_LEN];
+       u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+       {
+               .str = "a_frames_transmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+       },
+       {
+               .str = "a_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+       },
+       {
+               .str = "a_frame_check_sequence_errors",
+               .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+       },
+       {
+               .str = "a_alignment_errors",
+               .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+       },
+       {
+               .str = "a_octets_transmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+       },
+       {
+               .str = "a_octets_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+       },
+       {
+               .str = "a_multicast_frames_xmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+       },
+       {
+               .str = "a_broadcast_frames_xmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+       },
+       {
+               .str = "a_multicast_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+       },
+       {
+               .str = "a_broadcast_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+       },
+       {
+               .str = "a_in_range_length_errors",
+               .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+       },
+       {
+               .str = "a_out_of_range_length_field",
+               .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+       },
+       {
+               .str = "a_frame_too_long_errors",
+               .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+       },
+       {
+               .str = "a_symbol_error_during_carrier",
+               .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+       },
+       {
+               .str = "a_mac_control_frames_transmitted",
+               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+       },
+       {
+               .str = "a_mac_control_frames_received",
+               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+       },
+       {
+               .str = "a_unsupported_opcodes_received",
+               .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+       },
+       {
+               .str = "a_pause_mac_ctrl_frames_received",
+               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+       },
+       {
+               .str = "a_pause_mac_ctrl_frames_xmitted",
+               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+       },
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+                                     u32 stringset, u8 *data)
+{
+       u8 *p = data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       }
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+                                   struct ethtool_stats *stats, u64 *data)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+       int i;
+       int err;
+
+       mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+       for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+               data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return MLXSW_SX_PORT_HW_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+struct mlxsw_sx_port_link_mode {
+       u32 mask;
+       u32 supported;
+       u32 advertised;
+       u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+               .supported      = SUPPORTED_100baseT_Full,
+               .advertised     = ADVERTISED_100baseT_Full,
+               .speed          = 100,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+               .speed          = 100,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+                                 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+               .supported      = SUPPORTED_1000baseKX_Full,
+               .advertised     = ADVERTISED_1000baseKX_Full,
+               .speed          = 1000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+               .supported      = SUPPORTED_10000baseT_Full,
+               .advertised     = ADVERTISED_10000baseT_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+               .supported      = SUPPORTED_10000baseKX4_Full,
+               .advertised     = ADVERTISED_10000baseKX4_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+               .supported      = SUPPORTED_10000baseKR_Full,
+               .advertised     = ADVERTISED_10000baseKR_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+               .supported      = SUPPORTED_20000baseKR2_Full,
+               .advertised     = ADVERTISED_20000baseKR2_Full,
+               .speed          = 20000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+               .supported      = SUPPORTED_40000baseCR4_Full,
+               .advertised     = ADVERTISED_40000baseCR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+               .supported      = SUPPORTED_40000baseKR4_Full,
+               .advertised     = ADVERTISED_40000baseKR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+               .supported      = SUPPORTED_40000baseSR4_Full,
+               .advertised     = ADVERTISED_40000baseSR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+               .supported      = SUPPORTED_40000baseLR4_Full,
+               .advertised     = ADVERTISED_40000baseLR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+               .speed          = 25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+               .speed          = 50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+               .supported      = SUPPORTED_56000baseKR4_Full,
+               .advertised     = ADVERTISED_56000baseKR4_Full,
+               .speed          = 56000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+               .speed          = 100000,
+       },
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+               return SUPPORTED_FIBRE;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+               return SUPPORTED_Backplane;
+       return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+       u32 modes = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+                       modes |= mlxsw_sx_port_link_mode[i].supported;
+       }
+       return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+       u32 modes = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+                       modes |= mlxsw_sx_port_link_mode[i].advertised;
+       }
+       return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+                                           struct ethtool_cmd *cmd)
+{
+       u32 speed = SPEED_UNKNOWN;
+       u8 duplex = DUPLEX_UNKNOWN;
+       int i;
+
+       if (!carrier_ok)
+               goto out;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+                       speed = mlxsw_sx_port_link_mode[i].speed;
+                       duplex = DUPLEX_FULL;
+                       break;
+               }
+       }
+out:
+       ethtool_cmd_speed_set(cmd, speed);
+       cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+               return PORT_FIBRE;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+               return PORT_DA;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+               return PORT_NONE;
+
+       return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 eth_proto_cap;
+       u32 eth_proto_admin;
+       u32 eth_proto_oper;
+       int err;
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to get proto");
+               return err;
+       }
+       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+                             &eth_proto_admin, &eth_proto_oper);
+
+       cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+                        mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+                        SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+       cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+       mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+                                       eth_proto_oper, cmd);
+
+       eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+       cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+       cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+       cmd->transceiver = XCVR_INTERNAL;
+       return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+                       ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (speed == mlxsw_sx_port_link_mode[i].speed)
+                       ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 speed;
+       u32 eth_proto_new;
+       u32 eth_proto_cap;
+       u32 eth_proto_admin;
+       bool is_up;
+       int err;
+
+       speed = ethtool_cmd_speed(cmd);
+
+       eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+               mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+               mlxsw_sx_to_ptys_speed(speed);
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to get proto");
+               return err;
+       }
+       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+       eth_proto_new = eth_proto_new & eth_proto_cap;
+       if (!eth_proto_new) {
+               netdev_err(dev, "Not supported proto admin requested");
+               return -EINVAL;
+       }
+       if (eth_proto_new == eth_proto_admin)
+               return 0;
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to set proto admin");
+               return err;
+       }
+
+       err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+       if (err) {
+               netdev_err(dev, "Failed to get oper status");
+               return err;
+       }
+       if (!is_up)
+               return 0;
+
+       err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+       if (err) {
+               netdev_err(dev, "Failed to set admin status");
+               return err;
+       }
+
+       err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+       if (err) {
+               netdev_err(dev, "Failed to set admin status");
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+       .get_drvinfo            = mlxsw_sx_port_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+       .get_strings            = mlxsw_sx_port_get_strings,
+       .get_ethtool_stats      = mlxsw_sx_port_get_stats,
+       .get_sset_count         = mlxsw_sx_port_get_sset_count,
+       .get_settings           = mlxsw_sx_port_get_settings,
+       .set_settings           = mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+                                 struct switchdev_attr *attr)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_PORT_PARENT_ID:
+               attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+               memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+       .switchdev_port_attr_get        = mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+       char spad_pl[MLXSW_REG_SPAD_LEN];
+       int err;
+
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+       if (err)
+               return err;
+       mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+       return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       struct net_device *dev = mlxsw_sx_port->dev;
+       char ppad_pl[MLXSW_REG_PPAD_LEN];
+       int err;
+
+       mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+       if (err)
+               return err;
+       mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+       /* The last byte value in base mac address is guaranteed
+        * to be such it does not overflow when adding local_port
+        * value.
+        */
+       dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+       return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+                                      u16 vid, enum mlxsw_reg_spms_state state)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char *spms_pl;
+       int err;
+
+       spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+       if (!spms_pl)
+               return -ENOMEM;
+       mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+       kfree(spms_pl);
+       return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+                                  u32 speed)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+                                   enum mlxsw_reg_spmlr_learn_mode mode)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+       mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port;
+       struct net_device *dev;
+       bool usable;
+       int err;
+
+       dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+       if (!dev)
+               return -ENOMEM;
+       mlxsw_sx_port = netdev_priv(dev);
+       mlxsw_sx_port->dev = dev;
+       mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+       mlxsw_sx_port->local_port = local_port;
+
+       mlxsw_sx_port->pcpu_stats =
+               netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+       if (!mlxsw_sx_port->pcpu_stats) {
+               err = -ENOMEM;
+               goto err_alloc_stats;
+       }
+
+       dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+       dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+       dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+       err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+                       mlxsw_sx_port->local_port);
+               goto err_dev_addr_get;
+       }
+
+       netif_carrier_off(dev);
+
+       dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+                        NETIF_F_VLAN_CHALLENGED;
+
+       /* Each packet needs to have a Tx header (metadata) on top all other
+        * headers.
+        */
+       dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+       err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_module_check;
+       }
+
+       if (!usable) {
+               dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+                       mlxsw_sx_port->local_port);
+               goto port_not_usable;
+       }
+
+       err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_swid_set;
+       }
+
+       err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+                                     MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_speed_set;
+       }
+
+       err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_mtu_set;
+       }
+
+       err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+       if (err)
+               goto err_port_admin_status_set;
+
+       err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+                                         MLXSW_PORT_DEFAULT_VID,
+                                         MLXSW_REG_SPMS_STATE_FORWARDING);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_stp_state_set;
+       }
+
+       err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+                                                 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_mac_learning_mode_set;
+       }
+
+       err = register_netdev(dev);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+                       mlxsw_sx_port->local_port);
+               goto err_register_netdev;
+       }
+
+       mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+       return 0;
+
+err_register_netdev:
+err_port_admin_status_set:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+       free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+       free_netdev(dev);
+       return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+       if (!mlxsw_sx_port)
+               return;
+       unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+       mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+       free_percpu(mlxsw_sx_port->pcpu_stats);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+       int i;
+
+       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+               mlxsw_sx_port_remove(mlxsw_sx, i);
+       kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+       size_t alloc_size;
+       int i;
+       int err;
+
+       alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+       mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+       if (!mlxsw_sx->ports)
+               return -ENOMEM;
+
+       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+               err = mlxsw_sx_port_create(mlxsw_sx, i);
+               if (err)
+                       goto err_port_create;
+       }
+       return 0;
+
+err_port_create:
+       for (i--; i >= 1; i--)
+               mlxsw_sx_port_remove(mlxsw_sx, i);
+       kfree(mlxsw_sx->ports);
+       return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+                                    char *pude_pl, void *priv)
+{
+       struct mlxsw_sx *mlxsw_sx = priv;
+       struct mlxsw_sx_port *mlxsw_sx_port;
+       enum mlxsw_reg_pude_oper_status status;
+       u8 local_port;
+
+       local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+       mlxsw_sx_port = mlxsw_sx->ports[local_port];
+       if (!mlxsw_sx_port) {
+               dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+                        local_port);
+               return;
+       }
+
+       status = mlxsw_reg_pude_oper_status_get(pude_pl);
+       if (MLXSW_PORT_OPER_STATUS_UP == status) {
+               netdev_info(mlxsw_sx_port->dev, "link up\n");
+               netif_carrier_on(mlxsw_sx_port->dev);
+       } else {
+               netdev_info(mlxsw_sx_port->dev, "link down\n");
+               netif_carrier_off(mlxsw_sx_port->dev);
+       }
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+       .func = mlxsw_sx_pude_event_func,
+       .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+                                  enum mlxsw_event_trap_id trap_id)
+{
+       struct mlxsw_event_listener *el;
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int err;
+
+       switch (trap_id) {
+       case MLXSW_TRAP_ID_PUDE:
+               el = &mlxsw_sx_pude_event;
+               break;
+       }
+       err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+       if (err)
+               return err;
+
+       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+       if (err)
+               goto err_event_trap_set;
+
+       return 0;
+
+err_event_trap_set:
+       mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+       return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+                                     enum mlxsw_event_trap_id trap_id)
+{
+       struct mlxsw_event_listener *el;
+
+       switch (trap_id) {
+       case MLXSW_TRAP_ID_PUDE:
+               el = &mlxsw_sx_pude_event;
+               break;
+       }
+       mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+                                     void *priv)
+{
+       struct mlxsw_sx *mlxsw_sx = priv;
+       struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+       struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+       if (unlikely(!mlxsw_sx_port)) {
+               if (net_ratelimit())
+                       dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+                                local_port);
+               return;
+       }
+
+       skb->dev = mlxsw_sx_port->dev;
+
+       pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+       u64_stats_update_begin(&pcpu_stats->syncp);
+       pcpu_stats->rx_packets++;
+       pcpu_stats->rx_bytes += skb->len;
+       u64_stats_update_end(&pcpu_stats->syncp);
+
+       skb->protocol = eth_type_trans(skb, skb->dev);
+       netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_FDB_MC,
+       },
+       /* Traps for specific L2 packet types, not trapped as FDB MC */
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_STP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_LACP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_EAPOL,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_LLDP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_MMRP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_MVRP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_RPVST,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_DHCP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+       },
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+       char htgt_pl[MLXSW_REG_HTGT_LEN];
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int i;
+       int err;
+
+       mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+       if (err)
+               return err;
+
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+               err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+                                                     &mlxsw_sx_rx_listener[i],
+                                                     mlxsw_sx);
+               if (err)
+                       goto err_rx_listener_register;
+
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
+                                   mlxsw_sx_rx_listener[i].trap_id);
+               err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+               if (err)
+                       goto err_rx_trap_set;
+       }
+       return 0;
+
+err_rx_trap_set:
+       mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+                                         &mlxsw_sx_rx_listener[i],
+                                         mlxsw_sx);
+err_rx_listener_register:
+       for (i--; i >= 0; i--) {
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
+                                   mlxsw_sx_rx_listener[i].trap_id);
+               mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+               mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+                                                 &mlxsw_sx_rx_listener[i],
+                                                 mlxsw_sx);
+       }
+       return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
+                                   mlxsw_sx_rx_listener[i].trap_id);
+               mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+               mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+                                                 &mlxsw_sx_rx_listener[i],
+                                                 mlxsw_sx);
+       }
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+       char sfgc_pl[MLXSW_REG_SFGC_LEN];
+       char sgcr_pl[MLXSW_REG_SGCR_LEN];
+       char *smid_pl;
+       char *sftr_pl;
+       int err;
+
+       /* Due to FW bug, we must configure SMID. */
+       smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+       if (!smid_pl)
+               return -ENOMEM;
+       mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
+       kfree(smid_pl);
+       if (err)
+               return err;
+
+       /* Configure a flooding table, which includes only CPU port. */
+       sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+       if (!sftr_pl)
+               return -ENOMEM;
+       mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+       kfree(sftr_pl);
+       if (err)
+               return err;
+
+       /* Flood different packet types using the flooding table. */
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_BROADCAST,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sgcr_pack(sgcr_pl, true);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+                        const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+       struct mlxsw_sx *mlxsw_sx = priv;
+       int err;
+
+       mlxsw_sx->core = mlxsw_core;
+       mlxsw_sx->bus_info = mlxsw_bus_info;
+
+       err = mlxsw_sx_hw_id_get(mlxsw_sx);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+               return err;
+       }
+
+       err = mlxsw_sx_ports_create(mlxsw_sx);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+               return err;
+       }
+
+       err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+               goto err_event_register;
+       }
+
+       err = mlxsw_sx_traps_init(mlxsw_sx);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+               goto err_rx_listener_register;
+       }
+
+       err = mlxsw_sx_flood_init(mlxsw_sx);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+               goto err_flood_init;
+       }
+
+       return 0;
+
+err_flood_init:
+       mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+       mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+       mlxsw_sx_ports_remove(mlxsw_sx);
+       return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+       struct mlxsw_sx *mlxsw_sx = priv;
+
+       mlxsw_sx_traps_fini(mlxsw_sx);
+       mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+       mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+       .used_max_vepa_channels         = 1,
+       .max_vepa_channels              = 0,
+       .used_max_lag                   = 1,
+       .max_lag                        = 64,
+       .used_max_port_per_lag          = 1,
+       .max_port_per_lag               = 16,
+       .used_max_mid                   = 1,
+       .max_mid                        = 7000,
+       .used_max_pgt                   = 1,
+       .max_pgt                        = 0,
+       .used_max_system_port           = 1,
+       .max_system_port                = 48000,
+       .used_max_vlan_groups           = 1,
+       .max_vlan_groups                = 127,
+       .used_max_regions               = 1,
+       .max_regions                    = 400,
+       .used_flood_tables              = 1,
+       .max_flood_tables               = 2,
+       .max_vid_flood_tables           = 1,
+       .used_flood_mode                = 1,
+       .flood_mode                     = 3,
+       .used_max_ib_mc                 = 1,
+       .max_ib_mc                      = 0,
+       .used_max_pkey                  = 1,
+       .max_pkey                       = 0,
+       .swid_config                    = {
+               {
+                       .used_type      = 1,
+                       .type           = MLXSW_PORT_SWID_TYPE_ETH,
+               }
+       },
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+       .kind                   = MLXSW_DEVICE_KIND_SWITCHX2,
+       .owner                  = THIS_MODULE,
+       .priv_size              = sizeof(struct mlxsw_sx),
+       .init                   = mlxsw_sx_init,
+       .fini                   = mlxsw_sx_fini,
+       .txhdr_construct        = mlxsw_sx_txhdr_construct,
+       .txhdr_len              = MLXSW_TXHDR_LEN,
+       .profile                = &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+       return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+       mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644 (file)
index 0000000..53a9550
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+       /* Ethernet EMAD and FDB miss */
+       MLXSW_TRAP_ID_FDB_MC = 0x01,
+       MLXSW_TRAP_ID_ETHEMAD = 0x05,
+       /* L2 traps for specific packet types */
+       MLXSW_TRAP_ID_STP = 0x10,
+       MLXSW_TRAP_ID_LACP = 0x11,
+       MLXSW_TRAP_ID_EAPOL = 0x12,
+       MLXSW_TRAP_ID_LLDP = 0x13,
+       MLXSW_TRAP_ID_MMRP = 0x14,
+       MLXSW_TRAP_ID_MVRP = 0x15,
+       MLXSW_TRAP_ID_RPVST = 0x16,
+       MLXSW_TRAP_ID_DHCP = 0x19,
+       MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+       MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+       MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+       MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+       MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+       MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+       /* Port Up/Down event generated by hardware */
+       MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644 (file)
index 0000000..06fc46c
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+
+enum {
+       MLXSW_TXHDR_ETH_CTL,
+       MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+       MLXSW_TXHDR_ETCLASS_0,
+       MLXSW_TXHDR_ETCLASS_1,
+       MLXSW_TXHDR_ETCLASS_2,
+       MLXSW_TXHDR_ETCLASS_3,
+       MLXSW_TXHDR_ETCLASS_4,
+       MLXSW_TXHDR_ETCLASS_5,
+       MLXSW_TXHDR_ETCLASS_6,
+       MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+       MLXSW_TXHDR_RDQ_OTHER,
+       MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+       MLXSW_TXHDR_NOT_EMAD,
+       MLXSW_TXHDR_EMAD,
+};
+
+enum {
+       MLXSW_TXHDR_TYPE_DATA,
+       MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif
index c28111749e1f9ba95c8b49231da95f97278f31e8..2d1b9427407982b43673e96a085c7a9ff69e0a53 100644 (file)
@@ -8226,31 +8226,7 @@ static void s2io_rem_nic(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-/**
- * s2io_starter - Entry point for the driver
- * Description: This function is the entry point for the driver. It verifies
- * the module loadable parameters and initializes PCI configuration space.
- */
-
-static int __init s2io_starter(void)
-{
-       return pci_register_driver(&s2io_driver);
-}
-
-/**
- * s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It
- * unregisters the driver.
- */
-
-static __exit void s2io_closer(void)
-{
-       pci_unregister_driver(&s2io_driver);
-       DBG_PRINT(INIT_DBG, "cleanup done\n");
-}
-
-module_init(s2io_starter);
-module_exit(s2io_closer);
+module_pci_driver(s2io_driver);
 
 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
                                struct tcphdr **tcp, struct RxD_t *rxdp,
index d89b6ed82c51ac37d18dda4bfdd9937a0605c7f7..6c5997dc8afc064076e1a00ff726b3d87d3515c9 100644 (file)
@@ -1085,8 +1085,6 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp);
 static void tx_intr_handler(struct fifo_info *fifo_data);
 static void s2io_handle_errors(void * dev_id);
 
-static int s2io_starter(void);
-static void s2io_closer(void);
 static void s2io_tx_watchdog(struct net_device *dev);
 static void s2io_set_multicast(struct net_device *dev);
 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
index 33669c29b341cb42bb106ec2c634663d0adb2415..753ea8bad953c3a75487e3a544cc4106d4a46f68 100644 (file)
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
        if (fw->size & 0xF) {
                addr = dest + size;
                for (i = 0; i < (fw->size & 0xF); i++)
-                       data[i] = temp[size + i];
+                       data[i] = ((u8 *)temp)[size + i];
                for (; i < 16; i++)
                        data[i] = 0;
                ret = qlcnic_ms_mem_write128(adapter, addr,
index 2f6cc423ab1dff21cf810c5e6737af58dbae0686..7dbab3c20db5811d333f8f817769b44b79d57bbf 100644 (file)
@@ -2403,7 +2403,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
                        qlcnic_free_tx_rings(adapter);
                        return -ENOMEM;
                }
-               memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
                tx_ring->cmd_buf_arr = cmd_buf_arr;
                spin_lock_init(&tx_ring->tx_clean_lock);
        }
index 8aa50ac4e2d619ea62155442c34df76965133173..a157aaaaff6a183b161f5d7469fd8a7602be729d 100644 (file)
@@ -658,6 +658,8 @@ struct ravb_desc {
        __le32 dptr;    /* Descriptor pointer */
 };
 
+#define DPTR_ALIGN     4       /* Required descriptor pointer alignment */
+
 enum DIE_DT {
        /* Frame data */
        DT_FMID         = 0x40,
@@ -739,6 +741,7 @@ enum RAVB_QUEUE {
 #define RX_QUEUE_OFFSET        4
 #define NUM_RX_QUEUE   2
 #define NUM_TX_QUEUE   2
+#define NUM_TX_DESC    2       /* TX descriptors per packet */
 
 struct ravb_tstamp_skb {
        struct list_head list;
@@ -777,9 +780,9 @@ struct ravb_private {
        dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
        struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
        struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+       void *tx_align[NUM_TX_QUEUE];
        struct sk_buff **rx_skb[NUM_RX_QUEUE];
        struct sk_buff **tx_skb[NUM_TX_QUEUE];
-       void **tx_buffers[NUM_TX_QUEUE];
        u32 rx_over_errors;
        u32 rx_fifo_errors;
        struct net_device_stats stats[NUM_RX_QUEUE];
index fd9745714d903fa5945956709491be934ea02c67..3d972d8194200a693e91082f687bddc18a14ca7d 100644 (file)
@@ -195,12 +195,8 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        priv->tx_skb[q] = NULL;
 
        /* Free aligned TX buffers */
-       if (priv->tx_buffers[q]) {
-               for (i = 0; i < priv->num_tx_ring[q]; i++)
-                       kfree(priv->tx_buffers[q][i]);
-       }
-       kfree(priv->tx_buffers[q]);
-       priv->tx_buffers[q] = NULL;
+       kfree(priv->tx_align[q]);
+       priv->tx_align[q] = NULL;
 
        if (priv->rx_ring[q]) {
                ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -212,7 +208,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
 
        if (priv->tx_ring[q]) {
                ring_size = sizeof(struct ravb_tx_desc) *
-                           (priv->num_tx_ring[q] + 1);
+                           (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
                dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
                                  priv->tx_desc_dma[q]);
                priv->tx_ring[q] = NULL;
@@ -223,14 +219,13 @@ static void ravb_ring_free(struct net_device *ndev, int q)
 static void ravb_ring_format(struct net_device *ndev, int q)
 {
        struct ravb_private *priv = netdev_priv(ndev);
-       struct ravb_ex_rx_desc *rx_desc = NULL;
-       struct ravb_tx_desc *tx_desc = NULL;
-       struct ravb_desc *desc = NULL;
+       struct ravb_ex_rx_desc *rx_desc;
+       struct ravb_tx_desc *tx_desc;
+       struct ravb_desc *desc;
        int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
-       int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
-       struct sk_buff *skb;
+       int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+                          NUM_TX_DESC;
        dma_addr_t dma_addr;
-       void *buffer;
        int i;
 
        priv->cur_rx[q] = 0;
@@ -241,45 +236,33 @@ static void ravb_ring_format(struct net_device *ndev, int q)
        memset(priv->rx_ring[q], 0, rx_ring_size);
        /* Build RX ring buffer */
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
-               priv->rx_skb[q][i] = NULL;
-               skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
-               if (!skb)
-                       break;
-               ravb_set_buffer_align(skb);
                /* RX descriptor */
                rx_desc = &priv->rx_ring[q][i];
                /* The size of the buffer should be on 16-byte boundary. */
                rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
-               dma_addr = dma_map_single(&ndev->dev, skb->data,
+               dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
                                          ALIGN(PKT_BUF_SZ, 16),
                                          DMA_FROM_DEVICE);
-               if (dma_mapping_error(&ndev->dev, dma_addr)) {
-                       dev_kfree_skb(skb);
-                       break;
-               }
-               priv->rx_skb[q][i] = skb;
+               /* We just set the data size to 0 for a failed mapping which
+                * should prevent DMA from happening...
+                */
+               if (dma_mapping_error(&ndev->dev, dma_addr))
+                       rx_desc->ds_cc = cpu_to_le16(0);
                rx_desc->dptr = cpu_to_le32(dma_addr);
                rx_desc->die_dt = DT_FEMPTY;
        }
        rx_desc = &priv->rx_ring[q][i];
        rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
        rx_desc->die_dt = DT_LINKFIX; /* type */
-       priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
 
        memset(priv->tx_ring[q], 0, tx_ring_size);
        /* Build TX ring buffer */
-       for (i = 0; i < priv->num_tx_ring[q]; i++) {
-               priv->tx_skb[q][i] = NULL;
-               priv->tx_buffers[q][i] = NULL;
-               buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
-               if (!buffer)
-                       break;
-               /* Aligned TX buffer */
-               priv->tx_buffers[q][i] = buffer;
-               tx_desc = &priv->tx_ring[q][i];
+       for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+            i++, tx_desc++) {
+               tx_desc->die_dt = DT_EEMPTY;
+               tx_desc++;
                tx_desc->die_dt = DT_EEMPTY;
        }
-       tx_desc = &priv->tx_ring[q][i];
        tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
        tx_desc->die_dt = DT_LINKFIX; /* type */
 
@@ -298,7 +281,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
 static int ravb_ring_init(struct net_device *ndev, int q)
 {
        struct ravb_private *priv = netdev_priv(ndev);
+       struct sk_buff *skb;
        int ring_size;
+       int i;
 
        /* Allocate RX and TX skb rings */
        priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,10 +293,18 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        if (!priv->rx_skb[q] || !priv->tx_skb[q])
                goto error;
 
+       for (i = 0; i < priv->num_rx_ring[q]; i++) {
+               skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+               if (!skb)
+                       goto error;
+               ravb_set_buffer_align(skb);
+               priv->rx_skb[q][i] = skb;
+       }
+
        /* Allocate rings for the aligned buffers */
-       priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
-                                     sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
-       if (!priv->tx_buffers[q])
+       priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+                                   DPTR_ALIGN - 1, GFP_KERNEL);
+       if (!priv->tx_align[q])
                goto error;
 
        /* Allocate all RX descriptors. */
@@ -325,7 +318,8 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        priv->dirty_rx[q] = 0;
 
        /* Allocate all TX descriptors. */
-       ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+       ring_size = sizeof(struct ravb_tx_desc) *
+                   (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
        priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
                                              &priv->tx_desc_dma[q],
                                              GFP_KERNEL);
@@ -435,11 +429,12 @@ static int ravb_tx_free(struct net_device *ndev, int q)
        struct net_device_stats *stats = &priv->stats[q];
        struct ravb_tx_desc *desc;
        int free_num = 0;
-       int entry = 0;
+       int entry;
        u32 size;
 
        for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
-               entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+               entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+                                            NUM_TX_DESC);
                desc = &priv->tx_ring[q][entry];
                if (desc->die_dt != DT_FEMPTY)
                        break;
@@ -447,14 +442,18 @@ static int ravb_tx_free(struct net_device *ndev, int q)
                dma_rmb();
                size = le16_to_cpu(desc->ds_tagl) & TX_DS;
                /* Free the original skb. */
-               if (priv->tx_skb[q][entry]) {
+               if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
                        dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
                                         size, DMA_TO_DEVICE);
-                       dev_kfree_skb_any(priv->tx_skb[q][entry]);
-                       priv->tx_skb[q][entry] = NULL;
+                       /* Last packet descriptor? */
+                       if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+                               entry /= NUM_TX_DESC;
+                               dev_kfree_skb_any(priv->tx_skb[q][entry]);
+                               priv->tx_skb[q][entry] = NULL;
+                               stats->tx_packets++;
+                       }
                        free_num++;
                }
-               stats->tx_packets++;
                stats->tx_bytes += size;
                desc->die_dt = DT_EEMPTY;
        }
@@ -508,8 +507,8 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
        struct sk_buff *skb;
        dma_addr_t dma_addr;
        struct timespec64 ts;
-       u16 pkt_len = 0;
        u8  desc_status;
+       u16 pkt_len;
        int limit;
 
        boguscnt = min(boguscnt, *quota);
@@ -524,6 +523,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
                if (--boguscnt < 0)
                        break;
 
+               /* We use 0-byte descriptors to mark the DMA mapping errors */
+               if (!pkt_len)
+                       continue;
+
                if (desc_status & MSC_MC)
                        stats->multicast++;
 
@@ -543,10 +546,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
 
                        skb = priv->rx_skb[q][entry];
                        priv->rx_skb[q][entry] = NULL;
-                       dma_sync_single_for_cpu(&ndev->dev,
-                                               le32_to_cpu(desc->dptr),
-                                               ALIGN(PKT_BUF_SZ, 16),
-                                               DMA_FROM_DEVICE);
+                       dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+                                        ALIGN(PKT_BUF_SZ, 16),
+                                        DMA_FROM_DEVICE);
                        get_ts &= (q == RAVB_NC) ?
                                        RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
                                        ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +586,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
                        if (!skb)
                                break;  /* Better luck next round. */
                        ravb_set_buffer_align(skb);
-                       dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
-                                        ALIGN(PKT_BUF_SZ, 16),
-                                        DMA_FROM_DEVICE);
                        dma_addr = dma_map_single(&ndev->dev, skb->data,
                                                  le16_to_cpu(desc->ds_cc),
                                                  DMA_FROM_DEVICE);
                        skb_checksum_none_assert(skb);
-                       if (dma_mapping_error(&ndev->dev, dma_addr)) {
-                               dev_kfree_skb_any(skb);
-                               break;
-                       }
+                       /* We just set the data size to 0 for a failed mapping
+                        * which should prevent DMA  from happening...
+                        */
+                       if (dma_mapping_error(&ndev->dev, dma_addr))
+                               desc->ds_cc = cpu_to_le16(0);
                        desc->dptr = cpu_to_le32(dma_addr);
                        priv->rx_skb[q][entry] = skb;
                }
@@ -1272,45 +1272,60 @@ static void ravb_tx_timeout_work(struct work_struct *work)
 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
-       struct ravb_tstamp_skb *ts_skb = NULL;
        u16 q = skb_get_queue_mapping(skb);
+       struct ravb_tstamp_skb *ts_skb;
        struct ravb_tx_desc *desc;
        unsigned long flags;
        u32 dma_addr;
        void *buffer;
        u32 entry;
-       u32 tccr;
+       u32 len;
 
        spin_lock_irqsave(&priv->lock, flags);
-       if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+       if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+           NUM_TX_DESC) {
                netif_err(priv, tx_queued, ndev,
                          "still transmitting with the full ring!\n");
                netif_stop_subqueue(ndev, q);
                spin_unlock_irqrestore(&priv->lock, flags);
                return NETDEV_TX_BUSY;
        }
-       entry = priv->cur_tx[q] % priv->num_tx_ring[q];
-       priv->tx_skb[q][entry] = skb;
+       entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+       priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
        if (skb_put_padto(skb, ETH_ZLEN))
                goto drop;
 
-       buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
-       memcpy(buffer, skb->data, skb->len);
-       desc = &priv->tx_ring[q][entry];
-       desc->ds_tagl = cpu_to_le16(skb->len);
-       dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+       buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+                entry / NUM_TX_DESC * DPTR_ALIGN;
+       len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+       memcpy(buffer, skb->data, len);
+       dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
        if (dma_mapping_error(&ndev->dev, dma_addr))
                goto drop;
+
+       desc = &priv->tx_ring[q][entry];
+       desc->ds_tagl = cpu_to_le16(len);
+       desc->dptr = cpu_to_le32(dma_addr);
+
+       buffer = skb->data + len;
+       len = skb->len - len;
+       dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&ndev->dev, dma_addr))
+               goto unmap;
+
+       desc++;
+       desc->ds_tagl = cpu_to_le16(len);
        desc->dptr = cpu_to_le32(dma_addr);
 
        /* TX timestamp required */
        if (q == RAVB_NC) {
                ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
                if (!ts_skb) {
-                       dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+                       desc--;
+                       dma_unmap_single(&ndev->dev, dma_addr, len,
                                         DMA_TO_DEVICE);
-                       goto drop;
+                       goto unmap;
                }
                ts_skb->skb = skb;
                ts_skb->tag = priv->ts_skb_tag++;
@@ -1326,15 +1341,15 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        /* Descriptor type must be set after all the above writes */
        dma_wmb();
-       desc->die_dt = DT_FSINGLE;
+       desc->die_dt = DT_FEND;
+       desc--;
+       desc->die_dt = DT_FSTART;
 
-       tccr = ravb_read(ndev, TCCR);
-       if (!(tccr & (TCCR_TSRQ0 << q)))
-               ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
+       ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
 
-       priv->cur_tx[q]++;
-       if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
-           !ravb_tx_free(ndev, q))
+       priv->cur_tx[q] += NUM_TX_DESC;
+       if (priv->cur_tx[q] - priv->dirty_tx[q] >
+           (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
                netif_stop_subqueue(ndev, q);
 
 exit:
@@ -1342,9 +1357,12 @@ exit:
        spin_unlock_irqrestore(&priv->lock, flags);
        return NETDEV_TX_OK;
 
+unmap:
+       dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+                        le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
 drop:
        dev_kfree_skb_any(skb);
-       priv->tx_skb[q][entry] = NULL;
+       priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
        goto exit;
 }
 
index 2d8578cade03790782af7e97a1f59f9848301ae6..4cd5a71ad45e577d8cc4fb68c6b0c6d3d20e9b28 100644 (file)
@@ -202,6 +202,7 @@ enum {
        ROCKER_CTRL_IPV4_MCAST,
        ROCKER_CTRL_IPV6_MCAST,
        ROCKER_CTRL_DFLT_BRIDGING,
+       ROCKER_CTRL_DFLT_OVS,
        ROCKER_CTRL_MAX,
 };
 
@@ -321,9 +322,21 @@ static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
        return ntohs(vlan_id);
 }
 
+static bool rocker_port_is_slave(const struct rocker_port *rocker_port,
+                                  const char *kind)
+{
+       return rocker_port->bridge_dev &&
+               !strcmp(rocker_port->bridge_dev->rtnl_link_ops->kind, kind);
+}
+
 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
 {
-       return !!rocker_port->bridge_dev;
+       return rocker_port_is_slave(rocker_port, "bridge");
+}
+
+static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
+{
+       return rocker_port_is_slave(rocker_port, "openvswitch");
 }
 
 #define ROCKER_OP_FLAG_REMOVE          BIT(0)
@@ -1817,6 +1830,30 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
        return 0;
 }
 
+static int
+rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
+                                     struct rocker_desc_info *desc_info,
+                                     void *priv)
+{
+       int mtu = *(int *)priv;
+       struct rocker_tlv *cmd_info;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+                              rocker_port->pport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
+                              mtu))
+               return -EMSGSIZE;
+       rocker_tlv_nest_end(desc_info, cmd_info);
+       return 0;
+}
+
 static int
 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
                                  struct rocker_desc_info *desc_info,
@@ -1874,6 +1911,14 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
                               macaddr, NULL, NULL);
 }
 
+static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
+                                           int mtu)
+{
+       return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+                              rocker_cmd_set_port_settings_mtu_prep,
+                              &mtu, NULL, NULL);
+}
+
 static int rocker_port_set_learning(struct rocker_port *rocker_port,
                                    enum switchdev_trans trans)
 {
@@ -3243,6 +3288,12 @@ static struct rocker_ctrl {
                .bridge = true,
                .copy_to_cpu = true,
        },
+       [ROCKER_CTRL_DFLT_OVS] = {
+               /* pass all pkts up to CPU */
+               .eth_dst = zero_mac,
+               .eth_dst_mask = zero_mac,
+               .acl = true,
+       },
 };
 
 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
@@ -3755,11 +3806,14 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
                break;
        case BR_STATE_LEARNING:
        case BR_STATE_FORWARDING:
-               want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+               if (!rocker_port_is_ovsed(rocker_port))
+                       want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
                want[ROCKER_CTRL_IPV4_MCAST] = true;
                want[ROCKER_CTRL_IPV6_MCAST] = true;
                if (rocker_port_is_bridged(rocker_port))
                        want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+               else if (rocker_port_is_ovsed(rocker_port))
+                       want[ROCKER_CTRL_DFLT_OVS] = true;
                else
                        want[ROCKER_CTRL_LOCAL_ARP] = true;
                break;
@@ -3983,7 +4037,8 @@ static int rocker_port_open(struct net_device *dev)
 
        napi_enable(&rocker_port->napi_tx);
        napi_enable(&rocker_port->napi_rx);
-       rocker_port_set_enable(rocker_port, true);
+       if (!dev->proto_down)
+               rocker_port_set_enable(rocker_port, true);
        netif_start_queue(dev);
        return 0;
 
@@ -4102,8 +4157,11 @@ static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
                                          skb->data, skb_headlen(skb));
        if (err)
                goto nest_cancel;
-       if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
-               goto nest_cancel;
+       if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
+               err = skb_linearize(skb);
+               if (err)
+                       goto unmap_frags;
+       }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -4152,6 +4210,34 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
+static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int running = netif_running(dev);
+       int err;
+
+#define ROCKER_PORT_MIN_MTU    68
+#define ROCKER_PORT_MAX_MTU    9000
+
+       if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
+               return -EINVAL;
+
+       if (running)
+               rocker_port_stop(dev);
+
+       netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
+       dev->mtu = new_mtu;
+
+       err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
+       if (err)
+               return err;
+
+       if (running)
+               err = rocker_port_open(dev);
+
+       return err;
+}
+
 static int rocker_port_get_phys_port_name(struct net_device *dev,
                                          char *buf, size_t len)
 {
@@ -4167,11 +4253,23 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
        return err ? -EOPNOTSUPP : 0;
 }
 
+static int rocker_port_change_proto_down(struct net_device *dev,
+                                        bool proto_down)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+
+       if (rocker_port->dev->flags & IFF_UP)
+               rocker_port_set_enable(rocker_port, !proto_down);
+       rocker_port->dev->proto_down = proto_down;
+       return 0;
+}
+
 static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_open                       = rocker_port_open,
        .ndo_stop                       = rocker_port_stop,
        .ndo_start_xmit                 = rocker_port_xmit,
        .ndo_set_mac_address            = rocker_port_set_mac_address,
+       .ndo_change_mtu                 = rocker_port_change_mtu,
        .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
        .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
        .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
@@ -4179,6 +4277,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_fdb_del                    = switchdev_port_fdb_del,
        .ndo_fdb_dump                   = switchdev_port_fdb_dump,
        .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
+       .ndo_change_proto_down          = rocker_port_change_proto_down,
 };
 
 /********************
@@ -4726,6 +4825,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
        const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
        struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
        size_t rx_len;
+       u16 rx_flags = 0;
 
        if (!skb)
                return -ENOENT;
@@ -4733,6 +4833,8 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
        rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
        if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
                return -EINVAL;
+       if (attrs[ROCKER_TLV_RX_FLAGS])
+               rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
 
        rocker_dma_rx_ring_skb_unmap(rocker, attrs);
 
@@ -4740,6 +4842,9 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
        skb_put(skb, rx_len);
        skb->protocol = eth_type_trans(skb, rocker_port->dev);
 
+       if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
+               skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+
        rocker_port->dev->stats.rx_packets++;
        rocker_port->dev->stats.rx_bytes += skb->len;
 
@@ -4868,7 +4973,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
                       NAPI_POLL_WEIGHT);
        rocker_carrier_init(rocker_port);
 
-       dev->features |= NETIF_F_NETNS_LOCAL;
+       dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
 
        err = register_netdev(dev);
        if (err) {
@@ -4877,6 +4982,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
        }
        rocker->ports[port_number] = rocker_port;
 
+       switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
+
        rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
 
        err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
@@ -5156,6 +5263,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port,
                rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
 
        rocker_port->bridge_dev = bridge;
+       switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
 
        return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
                                    untagged_vid, 0);
@@ -5176,6 +5284,8 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
                rocker_port_internal_vlan_id_get(rocker_port,
                                                 rocker_port->dev->ifindex);
 
+       switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
+                                   false);
        rocker_port->bridge_dev = NULL;
 
        err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
@@ -5190,23 +5300,39 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
        return err;
 }
 
+
+static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
+                                  struct net_device *master)
+{
+       int err;
+
+       rocker_port->bridge_dev = master;
+
+       err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+       if (err)
+               return err;
+       err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+
+       return err;
+}
+
 static int rocker_port_master_changed(struct net_device *dev)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
        struct net_device *master = netdev_master_upper_dev_get(dev);
        int err = 0;
 
-       /* There are currently three cases handled here:
-        * 1. Joining a bridge
-        * 2. Leaving a previously joined bridge
-        * 3. Other, e.g. being added to or removed from a bond or openvswitch,
-        *    in which case nothing is done
-        */
-       if (master && master->rtnl_link_ops &&
-           !strcmp(master->rtnl_link_ops->kind, "bridge"))
-               err = rocker_port_bridge_join(rocker_port, master);
-       else if (rocker_port_is_bridged(rocker_port))
+       /* N.B: Do nothing if the type of master is not supported */
+       if (master && master->rtnl_link_ops) {
+               if (!strcmp(master->rtnl_link_ops->kind, "bridge"))
+                       err = rocker_port_bridge_join(rocker_port, master);
+               else if (!strcmp(master->rtnl_link_ops->kind, "openvswitch"))
+                       err = rocker_port_ovs_changed(rocker_port, master);
+       } else if (rocker_port_is_bridged(rocker_port)) {
                err = rocker_port_bridge_leave(rocker_port);
+       } else if (rocker_port_is_ovsed(rocker_port)) {
+               err = rocker_port_ovs_changed(rocker_port, NULL);
+       }
 
        return err;
 }
index c61fbf968036a3fe4a57f8afbef704bcffa37dc7..12490b2f65040e18a49a4782387736da411c19e5 100644 (file)
@@ -159,6 +159,7 @@ enum {
        ROCKER_TLV_CMD_PORT_SETTINGS_MODE,              /* u8 */
        ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,          /* u8 */
        ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,         /* binary */
+       ROCKER_TLV_CMD_PORT_SETTINGS_MTU,               /* u16 */
 
        __ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
        ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -245,6 +246,7 @@ enum {
 #define ROCKER_RX_FLAGS_TCP                    BIT(5)
 #define ROCKER_RX_FLAGS_UDP                    BIT(6)
 #define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD      BIT(7)
+#define ROCKER_RX_FLAGS_FWD_OFFLOAD            BIT(8)
 
 enum {
        ROCKER_TLV_TX_UNSPEC,
index 605cc8948594626783e093db71d474d638a26bc1..06b8061f1b42d7cd76ad51a63ff8969810cfb8c1 100644 (file)
@@ -49,6 +49,12 @@ enum {
  */
 #define HUNT_FILTER_TBL_ROWS 8192
 
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+struct efx_ef10_dev_addr {
+       u8 addr[ETH_ALEN];
+       u16 id;
+};
+
 struct efx_ef10_filter_table {
 /* The RX match field masks supported by this fw & hw, in order of priority */
        enum efx_filter_match_flags rx_match_flags[
@@ -69,13 +75,14 @@ struct efx_ef10_filter_table {
 /* Shadow of net_device address lists, guarded by mac_lock */
 #define EFX_EF10_FILTER_DEV_UC_MAX     32
 #define EFX_EF10_FILTER_DEV_MC_MAX     256
-       struct {
-               u8 addr[ETH_ALEN];
-               u16 id;
-       } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
-         dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
-       int dev_uc_count;               /* negative for PROMISC */
-       int dev_mc_count;               /* negative for PROMISC/ALLMULTI */
+       struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+       struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+       int dev_uc_count;
+       int dev_mc_count;
+/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
+       u16 ucdef_id;
+       u16 bcast_id;
+       u16 mcdef_id;
 };
 
 /* An arbitrary search limit for the software hash table */
@@ -387,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
         * First try to enable it, then if we get EPERM, just
         * ask if it's already enabled
         */
-       rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+       rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
        if (rc == 0) {
                nic_data->workaround_35388 = true;
        } else if (rc == -EPERM) {
@@ -984,12 +991,24 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
+#ifdef CONFIG_SFC_SRIOV
+       unsigned int i;
+#endif
 
        /* All our allocations have been reset */
        nic_data->must_realloc_vis = true;
        nic_data->must_restore_filters = true;
        nic_data->must_restore_piobufs = true;
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+       /* Driver-created vswitches and vports must be re-created */
+       nic_data->must_probe_vswitching = true;
+       nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+#ifdef CONFIG_SFC_SRIOV
+       if (nic_data->vf)
+               for (i = 0; i < efx->vf_count; i++)
+                       nic_data->vf[i].vport_id = 0;
+#endif
 }
 
 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
@@ -1034,6 +1053,12 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
 {
        int rc = efx_mcdi_reset(efx, reset_type);
 
+       /* Unprivileged functions return -EPERM, but need to return success
+        * here so that the datapath is brought back up.
+        */
+       if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
+               rc = 0;
+
        /* If it was a port reset, trigger reallocation of MC resources.
         * Note that on an MC reset nothing needs to be done now because we'll
         * detect the MC reset later and handle it then.
@@ -1558,10 +1583,6 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
        /* All our allocations have been reset */
        efx_ef10_reset_mc_allocations(efx);
 
-       /* Driver-created vswitches and vports must be re-created */
-       nic_data->must_probe_vswitching = true;
-       nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
-
        /* The datapath firmware might have been changed */
        nic_data->must_check_datapath_caps = true;
 
@@ -2197,6 +2218,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel)
                                    GFP_KERNEL);
 }
 
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+       MCDI_DECLARE_BUF_ERR(outbuf);
+       struct efx_nic *efx = channel->efx;
+       size_t outlen;
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+
+       if (rc && rc != -EALREADY)
+               goto fail;
+
+       return;
+
+fail:
+       efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+                              outbuf, outlen, rc);
+}
+
 static int efx_ef10_ev_init(struct efx_channel *channel)
 {
        MCDI_DECLARE_BUF(inbuf,
@@ -2208,6 +2252,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
        struct efx_ef10_nic_data *nic_data;
        bool supports_rx_merge;
        size_t inlen, outlen;
+       unsigned int enabled, implemented;
        dma_addr_t dma_addr;
        int rc;
        int i;
@@ -2248,30 +2293,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
        rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
                          outbuf, sizeof(outbuf), &outlen);
        /* IRQ return is ignored */
-       return rc;
-}
-
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
-       MCDI_DECLARE_BUF_ERR(outbuf);
-       struct efx_nic *efx = channel->efx;
-       size_t outlen;
-       int rc;
-
-       MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
-       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
-                         outbuf, sizeof(outbuf), &outlen);
+       if (channel->channel || rc)
+               return rc;
 
-       if (rc && rc != -EALREADY)
+       /* Successfully created event queue on channel 0 */
+       rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+       if (rc == -ENOSYS) {
+               /* GET_WORKAROUNDS was implemented before the bug26807
+                * workaround, thus the latter must be unavailable in this fw
+                */
+               nic_data->workaround_26807 = false;
+               rc = 0;
+       } else if (rc) {
                goto fail;
+       } else {
+               nic_data->workaround_26807 =
+                       !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+               if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
+                   !nic_data->workaround_26807) {
+                       unsigned int flags;
+
+                       rc = efx_mcdi_set_workaround(efx,
+                                                    MC_CMD_WORKAROUND_BUG26807,
+                                                    true, &flags);
+
+                       if (!rc) {
+                               if (flags &
+                                   1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+                                       netif_info(efx, drv, efx->net_dev,
+                                                  "other functions on NIC have been reset\n");
+                                       /* MC's boot count has incremented */
+                                       ++nic_data->warm_boot_count;
+                               }
+                               nic_data->workaround_26807 = true;
+                       } else if (rc == -EPERM) {
+                               rc = 0;
+                       }
+               }
+       }
 
-       return;
+       if (!rc)
+               return 0;
 
 fail:
-       efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
-                              outbuf, outlen, rc);
+       efx_ef10_ev_fini(channel);
+       return rc;
 }
 
 static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -3225,6 +3292,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
                                               filter_id, false);
 }
 
+static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
+{
+       return filter_id % HUNT_FILTER_TBL_ROWS;
+}
+
+static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+                                        enum efx_filter_priority priority,
+                                        u32 filter_id)
+{
+       return efx_ef10_filter_remove_internal(efx, 1U << priority,
+                                              filter_id, true);
+}
+
 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
                                    enum efx_filter_priority priority,
                                    u32 filter_id, struct efx_filter_spec *spec)
@@ -3598,6 +3678,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
                goto fail;
        }
 
+       table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+       table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+       table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+
        efx->filter_state = table;
        init_waitqueue_head(&table->waitq);
        return 0;
@@ -3700,145 +3784,233 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
        kfree(table);
 }
 
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
+               if (id != EFX_EF10_FILTER_ID_INVALID) { \
+                       filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+                       WARN_ON(!table->entry[filter_idx].spec); \
+                       table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
+               }
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
-       struct net_device *net_dev = efx->net_dev;
-       struct efx_filter_spec spec;
-       bool remove_failed = false;
-       struct netdev_hw_addr *uc;
-       struct netdev_hw_addr *mc;
-       unsigned int filter_idx;
-       int i, n, rc;
-
-       if (!efx_dev_registered(efx))
-               return;
+       unsigned int filter_idx, i;
 
        if (!table)
                return;
 
        /* Mark old filters that may need to be removed */
        spin_lock_bh(&efx->filter_lock);
-       n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
-       for (i = 0; i < n; i++) {
-               filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
-               table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
-       }
-       n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
-       for (i = 0; i < n; i++) {
-               filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
-               table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
-       }
+       for (i = 0; i < table->dev_uc_count; i++)
+               EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+       for (i = 0; i < table->dev_mc_count; i++)
+               EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
+       EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
+       EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
+       EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
        spin_unlock_bh(&efx->filter_lock);
+}
+#undef EFX_EF10_FILTER_DO_MARK_OLD
 
-       /* Copy/convert the address lists; add the primary station
-        * address and broadcast address
-        */
-       netif_addr_lock_bh(net_dev);
-       if (net_dev->flags & IFF_PROMISC ||
-           netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
-               table->dev_uc_count = -1;
-       } else {
-               table->dev_uc_count = 1 + netdev_uc_count(net_dev);
-               ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
-               i = 1;
-               netdev_for_each_uc_addr(uc, net_dev) {
-                       ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
-                       i++;
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct net_device *net_dev = efx->net_dev;
+       struct netdev_hw_addr *uc;
+       int addr_count;
+       unsigned int i;
+
+       table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+       addr_count = netdev_uc_count(net_dev);
+       if (net_dev->flags & IFF_PROMISC)
+               *promisc = true;
+       table->dev_uc_count = 1 + addr_count;
+       ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+       i = 1;
+       netdev_for_each_uc_addr(uc, net_dev) {
+               if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+                       *promisc = true;
+                       break;
                }
+               ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+               table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+               i++;
        }
-       if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
-           netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
-               table->dev_mc_count = -1;
-       } else {
-               table->dev_mc_count = 1 + netdev_mc_count(net_dev);
-               eth_broadcast_addr(table->dev_mc_list[0].addr);
-               i = 1;
-               netdev_for_each_mc_addr(mc, net_dev) {
-                       ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
-                       i++;
+}
+
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct net_device *net_dev = efx->net_dev;
+       struct netdev_hw_addr *mc;
+       unsigned int i, addr_count;
+
+       table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+       table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+       if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+               *promisc = true;
+
+       addr_count = netdev_mc_count(net_dev);
+       i = 0;
+       netdev_for_each_mc_addr(mc, net_dev) {
+               if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+                       *promisc = true;
+                       break;
                }
+               ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+               table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+               i++;
        }
-       netif_addr_unlock_bh(net_dev);
 
-       /* Insert/renew unicast filters */
-       if (table->dev_uc_count >= 0) {
-               for (i = 0; i < table->dev_uc_count; i++) {
-                       efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
-                                          EFX_FILTER_FLAG_RX_RSS,
-                                          0);
-                       efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-                                                table->dev_uc_list[i].addr);
-                       rc = efx_ef10_filter_insert(efx, &spec, true);
-                       if (rc < 0) {
-                               /* Fall back to unicast-promisc */
-                               while (i--)
-                                       efx_ef10_filter_remove_safe(
+       table->dev_mc_count = i;
+}
+
+static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
+                                            bool multicast, bool rollback)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct efx_ef10_dev_addr *addr_list;
+       struct efx_filter_spec spec;
+       u8 baddr[ETH_ALEN];
+       unsigned int i, j;
+       int addr_count;
+       int rc;
+
+       if (multicast) {
+               addr_list = table->dev_mc_list;
+               addr_count = table->dev_mc_count;
+       } else {
+               addr_list = table->dev_uc_list;
+               addr_count = table->dev_uc_count;
+       }
+
+       /* Insert/renew filters */
+       for (i = 0; i < addr_count; i++) {
+               efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+                                  EFX_FILTER_FLAG_RX_RSS,
+                                  0);
+               efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+                                        addr_list[i].addr);
+               rc = efx_ef10_filter_insert(efx, &spec, true);
+               if (rc < 0) {
+                       if (rollback) {
+                               netif_info(efx, drv, efx->net_dev,
+                                          "efx_ef10_filter_insert failed rc=%d\n",
+                                          rc);
+                               /* Fall back to promiscuous */
+                               for (j = 0; j < i; j++) {
+                                       if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+                                               continue;
+                                       efx_ef10_filter_remove_unsafe(
                                                efx, EFX_FILTER_PRI_AUTO,
-                                               table->dev_uc_list[i].id);
-                               table->dev_uc_count = -1;
-                               break;
+                                               addr_list[j].id);
+                                       addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+                               }
+                               return rc;
+                       } else {
+                               /* mark as not inserted, and carry on */
+                               rc = EFX_EF10_FILTER_ID_INVALID;
                        }
-                       table->dev_uc_list[i].id = rc;
                }
+               addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
        }
-       if (table->dev_uc_count < 0) {
+
+       if (multicast && rollback) {
+               /* Also need an Ethernet broadcast filter */
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
                                   EFX_FILTER_FLAG_RX_RSS,
                                   0);
-               efx_filter_set_uc_def(&spec);
+               eth_broadcast_addr(baddr);
+               efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
                rc = efx_ef10_filter_insert(efx, &spec, true);
                if (rc < 0) {
-                       WARN_ON(1);
-                       table->dev_uc_count = 0;
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "Broadcast filter insert failed rc=%d\n", rc);
+                       /* Fall back to promiscuous */
+                       for (j = 0; j < i; j++) {
+                               if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+                                       continue;
+                               efx_ef10_filter_remove_unsafe(
+                                       efx, EFX_FILTER_PRI_AUTO,
+                                       addr_list[j].id);
+                               addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+                       }
+                       return rc;
                } else {
-                       table->dev_uc_list[0].id = rc;
+                       table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
                }
        }
 
-       /* Insert/renew multicast filters */
-       if (table->dev_mc_count >= 0) {
-               for (i = 0; i < table->dev_mc_count; i++) {
+       return 0;
+}
+
+static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
+                                     bool rollback)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct efx_filter_spec spec;
+       u8 baddr[ETH_ALEN];
+       int rc;
+
+       efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+                          EFX_FILTER_FLAG_RX_RSS,
+                          0);
+
+       if (multicast)
+               efx_filter_set_mc_def(&spec);
+       else
+               efx_filter_set_uc_def(&spec);
+
+       rc = efx_ef10_filter_insert(efx, &spec, true);
+       if (rc < 0) {
+               netif_warn(efx, drv, efx->net_dev,
+                          "%scast mismatch filter insert failed rc=%d\n",
+                          multicast ? "Multi" : "Uni", rc);
+       } else if (multicast) {
+               table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+               if (!nic_data->workaround_26807) {
+                       /* Also need an Ethernet broadcast filter */
                        efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
                                           EFX_FILTER_FLAG_RX_RSS,
                                           0);
+                       eth_broadcast_addr(baddr);
                        efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-                                                table->dev_mc_list[i].addr);
+                                                baddr);
                        rc = efx_ef10_filter_insert(efx, &spec, true);
                        if (rc < 0) {
-                               /* Fall back to multicast-promisc */
-                               while (i--)
-                                       efx_ef10_filter_remove_safe(
-                                               efx, EFX_FILTER_PRI_AUTO,
-                                               table->dev_mc_list[i].id);
-                               table->dev_mc_count = -1;
-                               break;
+                               netif_warn(efx, drv, efx->net_dev,
+                                          "Broadcast filter insert failed rc=%d\n",
+                                          rc);
+                               if (rollback) {
+                                       /* Roll back the mc_def filter */
+                                       efx_ef10_filter_remove_unsafe(
+                                                       efx, EFX_FILTER_PRI_AUTO,
+                                                       table->mcdef_id);
+                                       table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+                                       return rc;
+                               }
+                       } else {
+                               table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
                        }
-                       table->dev_mc_list[i].id = rc;
-               }
-       }
-       if (table->dev_mc_count < 0) {
-               efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
-                                  EFX_FILTER_FLAG_RX_RSS,
-                                  0);
-               efx_filter_set_mc_def(&spec);
-               rc = efx_ef10_filter_insert(efx, &spec, true);
-               if (rc < 0) {
-                       WARN_ON(1);
-                       table->dev_mc_count = 0;
-               } else {
-                       table->dev_mc_list[0].id = rc;
                }
+               rc = 0;
+       } else {
+               table->ucdef_id = rc;
+               rc = 0;
        }
+       return rc;
+}
+
+/* Remove filters that weren't renewed.  Since nothing else changes the AUTO_OLD
+ * flag or removes these filters, we don't need to hold the filter_lock while
+ * scanning for these filters.
+ */
+static void efx_ef10_filter_remove_old(struct efx_nic *efx)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       bool remove_failed = false;
+       int i;
 
-       /* Remove filters that weren't renewed.  Since nothing else
-        * changes the AUTO_OLD flag or removes these filters, we
-        * don't need to hold the filter_lock while scanning for
-        * these filters.
-        */
        for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
                if (ACCESS_ONCE(table->entry[i].spec) &
                    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@@ -3917,6 +4089,87 @@ reset_nic:
        return rc ? rc : rc2;
 }
 
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct net_device *net_dev = efx->net_dev;
+       bool uc_promisc = false, mc_promisc = false;
+
+       if (!efx_dev_registered(efx))
+               return;
+
+       if (!table)
+               return;
+
+       efx_ef10_filter_mark_old(efx);
+
+       /* Copy/convert the address lists; add the primary station
+        * address and broadcast address
+        */
+       netif_addr_lock_bh(net_dev);
+       efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
+       efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
+       netif_addr_unlock_bh(net_dev);
+
+       /* Insert/renew unicast filters */
+       if (uc_promisc) {
+               efx_ef10_filter_insert_def(efx, false, false);
+               efx_ef10_filter_insert_addr_list(efx, false, false);
+       } else {
+               /* If any of the filters failed to insert, fall back to
+                * promiscuous mode - add in the uc_def filter.  But keep
+                * our individual unicast filters.
+                */
+               if (efx_ef10_filter_insert_addr_list(efx, false, false))
+                       efx_ef10_filter_insert_def(efx, false, false);
+       }
+
+       /* Insert/renew multicast filters */
+       /* If changing promiscuous state with cascaded multicast filters, remove
+        * old filters first, so that packets are dropped rather than duplicated
+        */
+       if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+               efx_ef10_filter_remove_old(efx);
+       if (mc_promisc) {
+               if (nic_data->workaround_26807) {
+                       /* If we failed to insert promiscuous filters, rollback
+                        * and fall back to individual multicast filters
+                        */
+                       if (efx_ef10_filter_insert_def(efx, true, true)) {
+                               /* Changing promisc state, so remove old filters */
+                               efx_ef10_filter_remove_old(efx);
+                               efx_ef10_filter_insert_addr_list(efx, true, false);
+                       }
+               } else {
+                       /* If we failed to insert promiscuous filters, don't
+                        * rollback.  Regardless, also insert the mc_list
+                        */
+                       efx_ef10_filter_insert_def(efx, true, false);
+                       efx_ef10_filter_insert_addr_list(efx, true, false);
+               }
+       } else {
+               /* If any filters failed to insert, rollback and fall back to
+                * promiscuous mode - mc_def filter and maybe broadcast.  If
+                * that fails, roll back again and insert as many of our
+                * individual multicast filters as we can.
+                */
+               if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+                       /* Changing promisc state, so remove old filters */
+                       if (nic_data->workaround_26807)
+                               efx_ef10_filter_remove_old(efx);
+                       if (efx_ef10_filter_insert_def(efx, true, true))
+                               efx_ef10_filter_insert_addr_list(efx, true, false);
+               }
+       }
+
+       efx_ef10_filter_remove_old(efx);
+       efx->mc_promisc = mc_promisc;
+}
+
 static int efx_ef10_set_mac_address(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -4085,6 +4338,8 @@ efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
        rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
 
 out:
+       if (rc == -EPERM)
+               rc = 0;
        rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
        return rc ? rc : rc2;
 }
index 81640f8bb811b099f6b2afb55cbb55f004c1096e..98d172b04f71815a1105b304ea5cadfa552687a2 100644 (file)
@@ -1779,15 +1779,31 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
        return rc;
 }
 
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+                           unsigned int *flags)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
+       size_t outlen;
+       int rc;
 
        BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
        MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
        MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
-       return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
-                           NULL, 0, NULL);
+       rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+
+       if (!flags)
+               return 0;
+
+       if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+               *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
+       else
+               *flags = 0;
+
+       return 0;
 }
 
 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
@@ -1816,7 +1832,11 @@ int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
        return 0;
 
 fail:
-       netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+       /* Older firmware lacks GET_WORKAROUNDS and this isn't especially
+        * terrifying.  The call site will have to deal with it though.
+        */
+       netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
+                    efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
        return rc;
 }
 
index 1838afe2da920c59f7ca43744c7a27a0f4e62a6c..025d504c472b5e4ff0bfc516162e69926f65ede6 100644 (file)
@@ -346,7 +346,8 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
 bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+                           unsigned int *flags);
 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
                             unsigned int *enabled_out);
 
index 45fca9fc66b7c9b2512abef1d3363dfbf1ab5344..4cc772164a79be013b7a2a6f66717f6b65c64ee3 100644 (file)
  * Unlike a warm boot, assume DMEM has been reloaded, so that
  * the MC persistent data must be reinitialised. */
 #define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode.  This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
 /* BIST state has been initialized */
 #define MC_FW_BIST_INIT_OK (128)
 
 #define MC_CMD_ERR_EINTR 4
 /* I/O failure */
 #define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
 /* Try again */
 #define MC_CMD_ERR_EAGAIN 11
 /* Out of memory */
 #define MC_CMD_ERR_ENODEV 19
 /* Invalid argument to target */
 #define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
 /* Out of range */
 #define MC_CMD_ERR_ERANGE 34
 /* Non-recursive resource is already acquired */
 #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
 /* The datapath is disabled. */
 #define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN  0x100c
+/* The requested operation might require the
+   command to be passed between MCs, and the
+   transport doesn't support that.  Should
+   only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
 
 #define MC_CMD_ERR_CODE_OFST 0
 
         MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST +            \
         (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
 
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n)  (((n) & 0xff) << 16)
+
 
 /* Version 2 adds an optional argument to error returns: the errno value
  * may be followed by the (0-based) number of the first argument that
 #define          MCDI_EVENT_AOE_BYTEBLASTER 0x9
 /* enum: DDR ECC status update */
 #define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define          MCDI_EVENT_AOE_PTP_STATUS 0xb
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
 #define        MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
 #define        MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
 #define        MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define        MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define          MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define          MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define          MCDI_EVENT_MUM_WATCHDOG 0x3
+#define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
 #define       MCDI_EVENT_SRC_LBN 36
 #define       MCDI_EVENT_EV_CODE_WIDTH 4
 #define       MCDI_EVENT_CODE_LBN 44
 #define       MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define          MCDI_EVENT_SW_EVENT 0x0
 /* enum: Bad assert. */
 #define          MCDI_EVENT_CODE_BADSSERT 0x1
 /* enum: PM Notice. */
 #define          MCDI_EVENT_CODE_MC_BIST 0x19
 /* enum: PTP tick event providing current NIC time */
 #define          MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define          MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define          MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
 #define          FCDI_EVENT_CODE_PTP_TICK 0x7
 /* enum: ECC error counters */
 #define          FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define          FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define          FCDI_EVENT_CODE_PORT_CONFIG 0xa
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define       FCDI_EVENT_PTP_STATE_OFST 0
+#define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define       FCDI_EVENT_PTP_STATE_LBN 0
+#define       FCDI_EVENT_PTP_STATE_WIDTH 32
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define       FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
 
 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
  * to the MC. Note that this structure | is overlayed over a normal FCDI event
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
 
+/* MUM_EVENT structuredef */
+#define    MUM_EVENT_LEN 8
+#define       MUM_EVENT_CONT_LBN 32
+#define       MUM_EVENT_CONT_WIDTH 1
+#define       MUM_EVENT_LEVEL_LBN 33
+#define       MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define          MUM_EVENT_LEVEL_INFO  0x0
+/* enum: Warning. */
+#define          MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define          MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define          MUM_EVENT_LEVEL_FATAL 0x3
+#define       MUM_EVENT_DATA_OFST 0
+#define        MUM_EVENT_SENSOR_ID_LBN 0
+#define        MUM_EVENT_SENSOR_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define        MUM_EVENT_SENSOR_STATE_LBN 8
+#define        MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define        MUM_EVENT_PORT_PHY_READY_LBN 0
+#define        MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define        MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define        MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define        MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define        MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define       MUM_EVENT_DATA_LBN 0
+#define       MUM_EVENT_DATA_WIDTH 32
+#define       MUM_EVENT_SRC_LBN 36
+#define       MUM_EVENT_SRC_WIDTH 8
+#define       MUM_EVENT_EV_CODE_LBN 60
+#define       MUM_EVENT_EV_CODE_WIDTH 4
+#define       MUM_EVENT_CODE_LBN 44
+#define       MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define          MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define          MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define          MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LBN 0
+#define       MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define       MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define       MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
 
 /***********************************/
 /* MC_CMD_READ32
 
 /* MC_CMD_COPYCODE_IN msgrequest */
 #define    MC_CMD_COPYCODE_IN_LEN 16
-/* Source address */
-#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-/* enum: The main image should be entered via a copy of a single word from and
- * to this address when none of the other magic behaviours are required.
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
  */
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
 #define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to start the datapath CPUs.
- * This is useful for certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
  */
 #define          MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to parse any configuration from
- * flash. (In addition, the datapath CPUs will not be started, as for
- * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
- * certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
  */
 #define          MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
 /* Destination address */
 #define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
 #define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
 /* Failing thread address */
 #define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
 
 /***********************************/
 /* MC_CMD_LOG_CTRL
- * Configure the output stream for various events and messages.
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
  */
 #define MC_CMD_LOG_CTRL 0x7
 
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
 /* enum: Event queue. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
 
 /* MC_CMD_LOG_CTRL_OUT msgresponse */
  * input on the same NIC.
  */
 #define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define          MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
 /* enum: Above this for future use. */
-#define          MC_CMD_PTP_OP_MAX 0x1b
+#define          MC_CMD_PTP_OP_MAX 0x1c
 
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
 #define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
-/* Event queue to send PTP time events to */
+/* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
 
 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
 /* 1 to enable PPS test mode, 0 to disable and return result. */
 #define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
 
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
 
 #define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
 
 /* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
-#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
 /* Time format required/used by for this NIC. Applies to all PTP MCDI
  * operations that pass times between the host and firmware. If this operation
  * is not supported (older firmware) a format of seconds and nanoseconds should
  * end and start times minus the time that the MC waited for host end.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
 /*            Enum values, see field(s): */
 /*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
 
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define    MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
 
 /***********************************/
 /* MC_CMD_CSR_READ32
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
 #define          MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define          MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define          MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define          MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
 /* enum: Only this option is allowed for non-admin functions */
 #define          MC_CMD_FW_DONT_CARE  0xffffffff
 
 #define          MC_CMD_LOOPBACK_SD_FES_WS  0x22
 /* enum: Near side of AOE Siena side port */
 #define          MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23
+/* enum: Medford Wireside datapath loopback */
+#define          MC_CMD_LOOPBACK_DATA_WS  0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define          MC_CMD_LOOPBACK_FORCE_EXT_LINK  0x25
 /* Supported loopbacks. */
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-/* enum: Flow control is off. */
-#define          MC_CMD_FCNTL_OFF 0x0
-/* enum: Respond to flow control. */
-#define          MC_CMD_FCNTL_RESPOND 0x1
-/* enum: Respond to and Issue flow control. */
-#define          MC_CMD_FCNTL_BIDIR 0x2
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
 #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
 
 /* MC_CMD_SET_MAC_IN msgrequest */
-#define    MC_CMD_SET_MAC_IN_LEN 24
+#define    MC_CMD_SET_MAC_IN_LEN 28
 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
  * EtherII, VLAN, bug16011 padding).
  */
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
 /* enum: Flow control is off. */
-/*               MC_CMD_FCNTL_OFF 0x0 */
+#define          MC_CMD_FCNTL_OFF 0x0
 /* enum: Respond to flow control. */
-/*               MC_CMD_FCNTL_RESPOND 0x1 */
+#define          MC_CMD_FCNTL_RESPOND 0x1
 /* enum: Respond to and Issue flow control. */
-/*               MC_CMD_FCNTL_BIDIR 0x2 */
+#define          MC_CMD_FCNTL_BIDIR 0x2
 /* enum: Auto neg flow control. */
 #define          MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define          MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define          MC_CMD_FCNTL_GENERATE 0x5
+#define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
 /* MC_CMD_SET_MAC_OUT msgresponse */
 #define    MC_CMD_SET_MAC_OUT_LEN 0
  * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
  * performed, and the statistics may be read from the message response. If
  * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
- * Locks required: None. Returns: 0, ETIME
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
  */
 #define MC_CMD_MAC_STATS 0x2e
 
 #define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
 #define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
 #define          MC_CMD_MAC_GENERATION_START  0x0 /* enum */
+#define          MC_CMD_MAC_DMABUF_START  0x1 /* enum */
 #define          MC_CMD_MAC_TX_PKTS  0x1 /* enum */
 #define          MC_CMD_MAC_TX_PAUSE_PKTS  0x2 /* enum */
 #define          MC_CMD_MAC_TX_CONTROL_PKTS  0x3 /* enum */
  * PM_AND_RXDP_COUNTERS capability only.
  */
 #define          MC_CMD_MAC_RXDP_STREAMING_PKTS  0x46
-/* enum: RXDP counter: Number of times an emergency descriptor fetch was
- * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
  */
 #define          MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS  0x47
 /* enum: RXDP counter: Number of times the DPCPU waited for an existing
 #define          MC_CMD_NVRAM_TYPE_LICENSE 0x12
 /* enum: FC Log. */
 #define          MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define          MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
 
 
 /***********************************/
  */
 #define MC_CMD_SCHEDINFO 0x3e
 
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SCHEDINFO_IN msgrequest */
 #define    MC_CMD_SCHEDINFO_IN_LEN 0
 
 #define          MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC  0x2c
 /* enum: Hotpoint temperature: degC */
 #define          MC_CMD_SENSOR_HOTPOINT_TEMP  0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT0  0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT1  0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define          MC_CMD_SENSOR_MUM_VCC  0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_A  0x31
+/* enum: 0.9v power phase A current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_A  0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_A_TEMP  0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_B  0x34
+/* enum: 0.9v power phase B current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_B  0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_B_TEMP  0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY  0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC  0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY  0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC  0x3a
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE1_NEXT  0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT  0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP  0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC  0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC  0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT  0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP  0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC  0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC  0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define          MC_CMD_SENSOR_SODIMM_VOUT  0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_0_TEMP  0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_1_TEMP  0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY0_VCC  0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY1_VCC  0x4d
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
 #define          MC_CMD_SENSOR_STATE_BROKEN  0x3
 /* enum: Sensor is working but does not currently have a reading. */
 #define          MC_CMD_SENSOR_STATE_NO_READING  0x4
+/* enum: Sensor initialisation failed. */
+#define          MC_CMD_SENSOR_STATE_INIT_FAILED  0x5
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
 
 /* MC_CMD_WORKAROUND_IN msgrequest */
 #define    MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_WORKAROUND_BUG17230 0x1
 #define          MC_CMD_WORKAROUND_BUG35388 0x2
 /* enum: Bug35017 workaround (A64 tables must be identity map) */
 #define          MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define          MC_CMD_WORKAROUND_BUG26807 0x6
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
 #define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
 
 /* MC_CMD_WORKAROUND_OUT msgresponse */
 #define    MC_CMD_WORKAROUND_OUT_LEN 0
 
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
 
 /***********************************/
 /* MC_CMD_GET_PHY_MEDIA_INFO
 
 /***********************************/
 /* MC_CMD_GET_MAC_ADDRESSES
- * Returns the base MAC, count and stride for the requestiong function
+ * Returns the base MAC, count and stride for the requesting function
  */
 #define MC_CMD_GET_MAC_ADDRESSES 0x55
 
 /* Spacing of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
 
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define    MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define       MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define          MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define          MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define          MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define    MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define    MC_CMD_CLP_IN_DEFAULT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_SET_MAC_LEN 12
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_GET_MAC_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define    MC_CMD_MUM_IN_LEN 4
+#define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define        MC_CMD_MUM_IN_OP_LBN 0
+#define        MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define          MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define          MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define          MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define          MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define          MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define          MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define          MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define          MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define          MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define          MC_CMD_MUM_OP_QSFP 0xc
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define    MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define       MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define    MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/*               MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define    MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define          MC_CMD_MUM_IN_LOG_OP_UART  0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define    MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define    MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_READ_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define       MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define       MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
 /* MC_CMD_RESOURCE_SPECIFIER enum */
 /* enum: Any */
 #define          MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
 #define          NVRAM_PARTITION_TYPE_PHY_MIN              0xa00
 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
 #define          NVRAM_PARTITION_TYPE_PHY_MAX              0xaff
+/* enum: Primary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA                 0xb00
+/* enum: Secondary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA_BACKUP          0xb01
+/* enum: FC firmware partition */
+#define          NVRAM_PARTITION_TYPE_FC_FIRMWARE          0xb02
+/* enum: FC License partition */
+#define          NVRAM_PARTITION_TYPE_FC_LICENSE           0xb03
+/* enum: Non-volatile log output partition for FC */
+#define          NVRAM_PARTITION_TYPE_FC_LOG               0xb04
+/* enum: MUM firmware partition */
+#define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE         0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_LOG              0xc01
+/* enum: MUM Application table partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_APPTABLE         0xc02
+/* enum: MUM boot rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_BOOT_ROM         0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_PROD_ROM         0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_USER_ROM         0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK         0xc06
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN  0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
 #define    LICENSED_APP_ID_LEN 4
 #define       LICENSED_APP_ID_ID_OFST 0
 /* enum: OpenOnload */
-#define          LICENSED_APP_ID_ONLOAD            0x1
+#define          LICENSED_APP_ID_ONLOAD                  0x1
 /* enum: PTP timestamping */
-#define          LICENSED_APP_ID_PTP               0x2
+#define          LICENSED_APP_ID_PTP                     0x2
 /* enum: SolarCapture Pro */
-#define          LICENSED_APP_ID_SOLARCAPTURE_PRO  0x4
+#define          LICENSED_APP_ID_SOLARCAPTURE_PRO        0x4
+/* enum: SolarSecure filter engine */
+#define          LICENSED_APP_ID_SOLARSECURE             0x8
+/* enum: Performance monitor */
+#define          LICENSED_APP_ID_PERF_MONITOR            0x10
+/* enum: SolarCapture Live */
+#define          LICENSED_APP_ID_SOLARCAPTURE_LIVE       0x20
+/* enum: Capture SolarSystem */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM     0x40
+/* enum: Network Access Control */
+#define          LICENSED_APP_ID_NETWORK_ACCESS_CONTROL  0x80
 #define       LICENSED_APP_ID_ID_LBN 0
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
-
-/***********************************/
-/* MC_CMD_GET_WORKAROUNDS
- * Read the list of all implemented and all currently enabled workarounds. The
- * enums here must correspond with those in MC_CMD_WORKAROUND.
- */
-#define MC_CMD_GET_WORKAROUNDS 0x59
-
-/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
-#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
-/* Each workaround is represented by a single bit according to the enums below.
- */
-#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
-#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
-/* enum: Bug 17230 work around. */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
-/* enum: Bug 35388 work around (unsafe EVQ writes). */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
-/* enum: Bug35017 workaround (A64 tables must be identity map) */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
-
-
-/***********************************/
-/* MC_CMD_LINK_STATE_MODE
- * Read/set link state mode of a VF
- */
-#define MC_CMD_LINK_STATE_MODE 0x5c
-
-#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
-#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
-/* The target function to have its link state mode read or set, must be a VF
- * e.g. VF 1,3 = 0x00030001
- */
-#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
-/* New link state mode to be set */
-#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
-/* enum: Use this value to just read the existing setting without modifying it.
- */
-#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
-
-/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
-#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
-#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+/* TX_TIMESTAMP_EVENT structuredef */
+#define    TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION  0x0
+/* enum: This is the low part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO  0x51
+/* enum: This is the high part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI  0x52
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define    RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define       RSS_MODE_HASH_SELECTOR_OFST 0
+#define       RSS_MODE_HASH_SELECTOR_LEN 1
+#define        RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define        RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_DST_ADDR_LBN 1
+#define        RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_SRC_PORT_LBN 2
+#define        RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define        RSS_MODE_HASH_DST_PORT_LBN 3
+#define        RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define       RSS_MODE_HASH_SELECTOR_LBN 0
+#define       RSS_MODE_HASH_SELECTOR_WIDTH 8
 
 
 /***********************************/
 
 #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_INIT_RXQ_IN msgrequest */
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
 #define    MC_CMD_INIT_RXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_RXQ_IN_LENMAX 252
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
 
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET  0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM  0x1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M  0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K  0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K  0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K  0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K  0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
 
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_INIT_TXQ
 
 #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_INIT_TXQ_IN msgrequest */
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
 #define    MC_CMD_INIT_TXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_TXQ_IN_LENMAX 252
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
 
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
 /* MC_CMD_INIT_TXQ_OUT msgresponse */
 #define    MC_CMD_INIT_TXQ_OUT_LEN 0
 
 /* MC_CMD_PROXY_CMD_OUT msgresponse */
 #define    MC_CMD_PROXY_CMD_OUT_LEN 0
 
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define    MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID  0x0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define       MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define       MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define       MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define       MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define       MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define       MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define       MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define       MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define       MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define       MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define       MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define          MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_ALLOC_BUFTBL_CHUNK
 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
 
+/* PORT_CONFIG_ENTRY structuredef */
+#define    PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define       PORT_CONFIG_ENTRY_CORE_OFST 1
+#define       PORT_CONFIG_ENTRY_CORE_LEN 1
+#define          PORT_CONFIG_ENTRY_STANDALONE  0x0 /* enum */
+#define          PORT_CONFIG_ENTRY_MASTER  0x1 /* enum */
+#define          PORT_CONFIG_ENTRY_SLAVE  0x2 /* enum */
+#define       PORT_CONFIG_ENTRY_CORE_LBN 8
+#define       PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define       PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define       PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define       PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define       PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define       PORT_CONFIG_ENTRY_LANES_OFST 4
+#define       PORT_CONFIG_ENTRY_LANES_LBN 32
+#define       PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define       PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define       PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define       PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
 
 /***********************************/
 /* MC_CMD_FILTER_OP
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_HOST  0x1
 /* enum: receive to MC */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_MC  0x2
-/* enum: loop back to port 0 TX MAC */
+/* enum: loop back to TXDP 0 */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX0  0x3
-/* enum: loop back to port 1 TX MAC */
+/* enum: loop back to TXDP 1 */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
 #define          MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH  0x80000000
 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
  * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
- * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
- * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
- * a valid handle.
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
 #define       MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
 
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP  0x0
+/* enum: receive to host */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST  0x1
+/* enum: receive to MC */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC  0x2
+/* enum: loop back to TXDP 0 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0  0x3
+/* enum: loop back to TXDP 1 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1  0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS  0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING  0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH  0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT  0xffffffff
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN  0x0
+/* enum: Match Geneve traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE  0x1
+/* enum: Reserved for experimental development use */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL  0xfe
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define          MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE  0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
 /* MC_CMD_FILTER_OP_OUT msgresponse */
 #define    MC_CMD_FILTER_OP_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID  0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID  0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_OUT/HANDLE */
 
 
 /***********************************/
 #define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
 /* enum: read the list of supported RX filter matches */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES  0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS  0x2
 
 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
 
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
 
 /***********************************/
 /* MC_CMD_PARSER_DISP_RW
 #define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU  0x0
 /* enum: TX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU  0x1
-/* enum: Lookup engine */
+/* enum: Lookup engine (with original metadata format) */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE  0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA  0x3
 /* identifies the type of operation requested */
 #define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
 /* enum: read a word of DICPU DMEM or a LUE entry */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
 /* value to write (for LUE writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
 /* The maximum number of VIs that would be useful */
 #define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
 
-/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
 #define    MC_CMD_ALLOC_VIS_OUT_LEN 8
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
  */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
 
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
 
 /***********************************/
 /* MC_CMD_FREE_VIS
 #define    MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
 
 /* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
 
 
 /***********************************/
 #define MC_CMD_GET_CAPABILITIES 0xbe
 
 #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_CAPABILITIES_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_IN_LEN 0
 
 #define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP  0x0
 /* enum: Low latency RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY  0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH  0x101
 /* enum: RXDP Test firmware image 2 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP  0x0
 /* enum: Low latency TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY  0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT  0x101
 /* enum: TXDP Test firmware image 2 */
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM  0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY  0xf
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
 /* Licensed capabilities */
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
 
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
 
 #define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-/* bitmask of the priority queues this txq is inserted into */
+/* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
 /* an already reserved bucket (typically set to bucket associated with outer
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
 
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
 
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN  0x1
 /* enum: VEB */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB  0x2
-/* enum: VEPA */
+/* enum: VEPA (obsolete) */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA  0x3
+/* enum: MUX */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX  0x4
+/* enum: Snapper specific; semantics TBD */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST  0x5
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to support. */
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
 #define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
 
 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to insert/remove. */
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
 #define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
 
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
-/* The handle of the new RSS context */
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID  0xffffffff
 
 
 /***********************************/
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags */
+/* Hash control flags. The _EN bits are always supported. The _MODE bits only
+ * work when the firmware reports ADDITIONAL_RSS_MODES in
+ * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
+ * See the RSS_MODE structure for the meaning of the mode bits.
+ */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
 
 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
 
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags */
+/* Hash control flags. If any _MODE bits are non-zero (which will only be true
+ * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
+ * disregarded (but are guaranteed to be consistent with the _MODE bits if
+ * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
+ * allocated).
+ */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
 
 
 /***********************************/
 
 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping */
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID  0xffffffff
 
 
 /***********************************/
 
 
 /***********************************/
-/* MC_CMD_RMON_RX_CLASS_STATS
- * Retrieve rmon rx class statistics
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
  */
-#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
-
-/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
 
-/***********************************/
-/* MC_CMD_RMON_TX_CLASS_STATS
- * Retrieve rmon tx class statistics
- */
-#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
 
-/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
 
 
 /***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
- * Retrieve rmon rx super_class statistics
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
  */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
 
-
-/***********************************/
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
- * Retrieve rmon tx super_class statistics
- */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_CLASS
- * Allocate an rmon class
- */
-#define MC_CMD_RMON_ALLOC_CLASS 0xca
-
-/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
-/* class */
-#define       MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_CLASS
- * Deallocate an rmon class
- */
-#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
-
-/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
-/* class */
-#define       MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS
- * Allocate an rmon super_class
- */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
- * Deallocate an rmon tx super_class
- */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_UP_CONV_STATS
- * Retrieve up converter statistics
- */
-#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPI_STATS
- * Retrieve rx ipi stats
- */
-#define MC_CMD_RMON_RX_IPI_STATS 0xcf
-
-/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
- * Retrieve rx ipsec cntxt_ptr indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
- * Retrieve rx ipsec port indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_VPORT_ADD_MAC_ADDRESS
- * Add a MAC address to a v-port
- */
-#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
-
-#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
-#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
-/* The handle of the v-port */
-#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
-/* MAC address to add */
-#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
-#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
-
-/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
-#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_VPORT_DEL_MAC_ADDRESS
- * Delete a MAC address from a v-port
- */
-#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
-
-#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
 #define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
 
 
-/***********************************/
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
- * Retrieve rx class drop stats
- */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
- * Retrieve rx super class drop stats
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPI_STATS
- * Retrieve tx ipi stats
- */
-#define MC_CMD_RMON_TX_IPI_STATS 0xd7
-
-/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
- * Retrieve tx ipsec counters by cntxt_ptr
- */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
- * Retrieve tx ipsec counters by port
- */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_STATS
- * Retrieve tx nowhere stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
- * Retrieve tx nowhere qbb stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define       MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
-/* class */
-#define       MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define       MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
-
-
 /***********************************/
 /* MC_CMD_GET_CLOCK
  * Return the system and PDCPU clock frequencies.
 #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_SET_CLOCK_IN msgrequest */
-#define    MC_CMD_SET_CLOCK_IN_LEN 12
-/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define    MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
 #define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the system clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for inter-core clock domain */
 #define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for DPCPU clock domain */
 #define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for MC clock domain */
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE  0x0
 
 /* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define    MC_CMD_SET_CLOCK_OUT_LEN 12
+#define    MC_CMD_SET_CLOCK_OUT_LEN 28
 /* Resulting system frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting inter-core frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED  0x0
 /* Resulting DPCPU frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED  0x0
+/* Resulting PCS frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED  0x0
+/* Resulting MC frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED  0x0
+/* Resulting rmon frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED  0x0
+/* Resulting vswitch frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED  0x0
 
 
 /***********************************/
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-/* enum: RxDPCPU */
-#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX   0x0
+/* enum: RxDPCPU0 */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0  0x0
 /* enum: TxDPCPU0 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX0  0x1
 /* enum: TxDPCPU1 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX1  0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX1   0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX   0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX   0x81
 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
  * initialised to zero
  */
 #define    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
 
 
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define    MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA  0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define    MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
 /***********************************/
 /* MC_CMD_CAP_BLK_READ
  * Read multiple 64bit words from capture block memory
  * more data is returned.
  */
 #define          MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT  0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define          MC_CMD_KR_TUNE_IN_READ_FOM  0x7
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
 #define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
+/* enum: Attenuation (0-15, TBD for Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT  0x0
-/* enum: CTLE Boost (0-15) */
+/* enum: CTLE Boost (0-15, TBD for Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST  0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
+ * for Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1  0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2  0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3  0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4  0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5  0x6
+/* enum: Edge DFE DLEV (TBD for Medford) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV  0x7
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0  0x0 /* enum */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY  0x7
 /* enum: TX Slew Rate Fine control */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET  0x8
+/* enum: TX Termination Impedance control */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET  0x9
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0  0x0 /* enum */
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
 
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define    MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
 
 /***********************************/
 /* MC_CMD_PCIE_TUNE
 #define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
 /* enum: validate application */
 #define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
+/* enum: mask application */
+#define          MC_CMD_LICENSED_APP_OP_IN_OP_MASK  0x1
 /* arguments specific to this particular operation */
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
 
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure port sniffing for the physical port associated with the calling
+ * Configure RX port sniffing for the physical port associated with the calling
  * function. Only a privileged function may change the port sniffing
  * configuration. A copy of all traffic delivered to the host (non-promiscuous
  * mode) or all traffic arriving at the port (promiscuous mode) may be
 
 /***********************************/
 /* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current port sniffing configuration for the physical port
+ * Obtain the current RX port sniffing configuration for the physical port
  * associated with the calling function. Only a privileged function may read
  * the configuration.
  */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
 
 
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN  0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX  0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
+/* enum: receiving to multiple queues using RSS context */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define    MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define    MC_CMD_READ_ATB_IN_LEN 16
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define          MC_CMD_READ_ATB_IN_BUS_CCOM  0x0 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CKR  0x1 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CPCIE  0x8 /* enum */
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define    MC_CMD_READ_ATB_OUT_LEN 4
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define          MC_CMD_PRIVILEGE_MASK_IN_VF_NULL  0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN             0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK              0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD            0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP               0x8 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS  0x10 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING      0x20 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST           0x40 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST         0x80 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST         0x100 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST     0x200 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS       0x400 /* enum */
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE             0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define    MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE       0x0 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL        0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY   0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY   0x3 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF  0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE        0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define    MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK            0x0 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128   0x1 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256   0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID          0xff /* enum */
+/* Sector data */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define    MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define    MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
 #endif /* MCDI_PCOL_H */
index 47d1e3a96522668a1cf1c80cacd141d2afbf1193..4d35313a239db77d0d90a273e1d02aa6dfaabd81 100644 (file)
@@ -925,6 +925,7 @@ struct vfdi_status;
  * @stats_lock: Statistics update lock. Must be held when calling
  *     efx_nic_type::{update,start,stop}_stats.
  * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ * @mc_promisc: Whether in multicast promiscuous mode when last changed
  *
  * This is stored in the private area of the &struct net_device.
  */
@@ -1072,6 +1073,7 @@ struct efx_nic {
        int last_irq_cpu;
        spinlock_t stats_lock;
        atomic_t n_rx_noskb_drops;
+       bool mc_promisc;
 };
 
 static inline int efx_dev_registered(struct efx_nic *efx)
index 31ff9084d9a46624d3a2350a10fdddfc18bbfc08..0b536e27d3b2291f0af62c4d51b005b0cfec8a72 100644 (file)
@@ -506,6 +506,7 @@ enum {
  * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @workaround_26807: Flag: firmware supports workaround for bug 26807
  * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
  *     after MC reboot
  * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
@@ -535,6 +536,7 @@ struct efx_ef10_nic_data {
        bool rx_rss_context_exclusive;
        u64 stats[EF10_STAT_COUNT];
        bool workaround_35388;
+       bool workaround_26807;
        bool must_check_datapath_caps;
        u32 datapath_caps;
        unsigned int rx_dpcpu_fw_id;
index b605dfd5c7bc7146600908b1c7a40f79b28dfd53..9d78830da6097ff1e6a42873f580734c7f970180 100644 (file)
@@ -114,7 +114,10 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
 
        if (efx->type->test_nvram) {
                rc = efx->type->test_nvram(efx);
-               tests->nvram = rc ? -1 : 1;
+               if (rc == -EPERM)
+                       rc = 0;
+               else
+                       tests->nvram = rc ? -1 : 1;
        }
 
        return rc;
@@ -253,6 +256,12 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
        mutex_lock(&efx->mac_lock);
        rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
        mutex_unlock(&efx->mac_lock);
+       if (rc == -EPERM)
+               rc = 0;
+       else
+               netif_info(efx, drv, efx->net_dev,
+                          "%s phy selftest\n", rc ? "Failed" : "Passed");
+
        return rc;
 }
 
@@ -661,6 +670,9 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
        wmb();
        kfree(state);
 
+       if (rc == -EPERM)
+               rc = 0;
+
        return rc;
 }
 
index b323b9167526f6f48da1e13da8fbc54fd4daa9e5..b2f886d9042976956aee1f2d7c8ba84751ae7de4 100644 (file)
@@ -1042,9 +1042,5 @@ const struct efx_nic_type siena_a0_nic_type = {
        .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
        .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
                             1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
-                            1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
-                            1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
-                            1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
-                            1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
-                            1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
+                            1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
 };
index e817a1a4437927d8976fbfe64c4408f5c0fc5c52..b1e5f24708c923d5b9f4f54924bc1f8c34f64496 100644 (file)
 #include "stmmac.h"
 #include "stmmac_platform.h"
 
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       int ret;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       if (pdev->dev.of_node) {
+               plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+               if (IS_ERR(plat_dat)) {
+                       dev_err(&pdev->dev, "dt configuration failed\n");
+                       return PTR_ERR(plat_dat);
+               }
+       } else {
+               plat_dat = dev_get_platdata(&pdev->dev);
+               if (!plat_dat) {
+                       dev_err(&pdev->dev, "no platform data provided\n");
+                       return  -EINVAL;
+               }
+
+               /* Set default value for multicast hash bins */
+               plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+               /* Set default value for unicast filter entries */
+               plat_dat->unicast_filter_entries = 1;
+       }
+
+       /* Custom initialisation (if needed) */
+       if (plat_dat->init) {
+               ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+               if (ret)
+                       return ret;
+       }
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
 static const struct of_device_id dwmac_generic_match[] = {
        { .compatible = "st,spear600-gmac"},
        { .compatible = "snps,dwmac-3.610"},
@@ -27,7 +67,7 @@ static const struct of_device_id dwmac_generic_match[] = {
 MODULE_DEVICE_TABLE(of, dwmac_generic_match);
 
 static struct platform_driver dwmac_generic_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = dwmac_generic_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = STMMAC_RESOURCE_NAME,
index 7e3129e7f143a9990c89780a8e6638f8182e4892..333489f0fd24d80ec5d09584b4cbb489eb1a880d 100644 (file)
@@ -248,23 +248,40 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
        return NULL;
 }
 
-static void *ipq806x_gmac_setup(struct platform_device *pdev)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+       struct ipq806x_gmac *gmac = priv;
+
+       ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
 {
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
        struct device *dev = &pdev->dev;
        struct ipq806x_gmac *gmac;
        int val;
        void *err;
 
+       val = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (val)
+               return val;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
        gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
        if (!gmac)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        gmac->pdev = pdev;
 
        err = ipq806x_gmac_of_parse(gmac);
-       if (err) {
+       if (IS_ERR(err)) {
                dev_err(dev, "device tree parsing error\n");
-               return err;
+               return PTR_ERR(err);
        }
 
        regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -285,7 +302,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
        default:
                dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
                        phy_modes(gmac->phy_mode));
-               return NULL;
+               return -EINVAL;
        }
        regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
 
@@ -304,7 +321,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
        default:
                dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
                        phy_modes(gmac->phy_mode));
-               return NULL;
+               return -EINVAL;
        }
        regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
 
@@ -327,30 +344,21 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
                             0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
        }
 
-       return gmac;
-}
+       plat_dat->has_gmac = true;
+       plat_dat->bsp_priv = gmac;
+       plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
 
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
-{
-       struct ipq806x_gmac *gmac = priv;
-
-       ipq806x_gmac_set_speed(gmac, speed);
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data ipq806x_gmac_data = {
-       .has_gmac       = 1,
-       .setup          = ipq806x_gmac_setup,
-       .fix_mac_speed  = ipq806x_gmac_fix_mac_speed,
-};
-
 static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
-       { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+       { .compatible = "qcom,ipq806x-gmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
 
 static struct platform_driver ipq806x_gmac_dwmac_driver = {
-       .probe = stmmac_pltfr_probe,
+       .probe = ipq806x_gmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "ipq806x-gmac-dwmac",
index cb888d3ebbdc3054732bdcfc8b17f8179e8f78fa..78e9d1861896335d86ac31e5701856d9bfe8779b 100644 (file)
 # define LPC18XX_CREG_CREG6_ETHMODE_MII                0x0
 # define LPC18XX_CREG_CREG6_ETHMODE_RMII       0x4
 
-struct lpc18xx_dwmac_priv_data {
+static int lpc18xx_dwmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
        struct regmap *reg;
-       int interface;
-};
+       u8 ethmode;
+       int ret;
 
-static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
-{
-       struct lpc18xx_dwmac_priv_data *dwmac;
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
 
-       dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-       if (!dwmac)
-               return ERR_PTR(-ENOMEM);
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
 
-       dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
-       if (dwmac->interface < 0)
-               return ERR_PTR(dwmac->interface);
+       plat_dat->has_gmac = true;
 
-       dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
-       if (IS_ERR(dwmac->reg)) {
-               dev_err(&pdev->dev, "Syscon lookup failed\n");
-               return dwmac->reg;
+       reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+       if (IS_ERR(reg)) {
+               dev_err(&pdev->dev, "syscon lookup failed\n");
+               return PTR_ERR(reg);
        }
 
-       return dwmac;
-}
-
-static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
-{
-       struct lpc18xx_dwmac_priv_data *dwmac = priv;
-       u8 ethmode;
-
-       if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+       if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
                ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
-       } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+       } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
                ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
        } else {
                dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
                return -EINVAL;
        }
 
-       regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+       regmap_update_bits(reg, LPC18XX_CREG_CREG6,
                           LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
 
-       return 0;
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data lpc18xx_dwmac_data = {
-       .has_gmac = 1,
-       .setup = lpc18xx_dwmac_setup,
-       .init = lpc18xx_dwmac_init,
-};
-
 static const struct of_device_id lpc18xx_dwmac_match[] = {
-       { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+       { .compatible = "nxp,lpc1850-dwmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
 
 static struct platform_driver lpc18xx_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = lpc18xx_dwmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "lpc18xx-dwmac",
index 61a324a87d09e0b6f06873ce35131adbfbf9826d..c1bac1912b37189d85510fa6648e6e6271c8b0d1 100644 (file)
@@ -47,36 +47,45 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed)
        writel(val, dwmac->reg);
 }
 
-static void *meson6_dwmac_setup(struct platform_device *pdev)
+static int meson6_dwmac_probe(struct platform_device *pdev)
 {
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
        struct meson_dwmac *dwmac;
        struct resource *res;
+       int ret;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
 
        dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
        if (!dwmac)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(dwmac->reg))
-               return ERR_CAST(dwmac->reg);
+               return PTR_ERR(dwmac->reg);
 
-       return dwmac;
-}
+       plat_dat->bsp_priv = dwmac;
+       plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
 
-static const struct stmmac_of_data meson6_dwmac_data = {
-       .setup          = meson6_dwmac_setup,
-       .fix_mac_speed  = meson6_dwmac_fix_mac_speed,
-};
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id meson6_dwmac_match[] = {
-       { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+       { .compatible = "amlogic,meson6-dwmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
 
 static struct platform_driver meson6_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = meson6_dwmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "meson6-dwmac",
index 00a1e1e09d4f33bb1cd9dec333691e746dadb1ff..11baa4b197793f583eba9a5dc5c53aefb145ff9b 100644 (file)
@@ -46,7 +46,7 @@ struct rk_priv_data {
        struct platform_device *pdev;
        int phy_iface;
        struct regulator *regulator;
-       struct rk_gmac_ops *ops;
+       const struct rk_gmac_ops *ops;
 
        bool clk_enabled;
        bool clock_input;
@@ -177,7 +177,7 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
        }
 }
 
-struct rk_gmac_ops rk3288_ops = {
+static const struct rk_gmac_ops rk3288_ops = {
        .set_to_rgmii = rk3288_set_to_rgmii,
        .set_to_rmii = rk3288_set_to_rmii,
        .set_rgmii_speed = rk3288_set_rgmii_speed,
@@ -289,7 +289,7 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
        }
 }
 
-struct rk_gmac_ops rk3368_ops = {
+static const struct rk_gmac_ops rk3368_ops = {
        .set_to_rgmii = rk3368_set_to_rgmii,
        .set_to_rmii = rk3368_set_to_rmii,
        .set_rgmii_speed = rk3368_set_rgmii_speed,
@@ -448,7 +448,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
 }
 
 static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
-                                         struct rk_gmac_ops *ops)
+                                         const struct rk_gmac_ops *ops)
 {
        struct rk_priv_data *bsp_priv;
        struct device *dev = &pdev->dev;
@@ -529,16 +529,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
        return bsp_priv;
 }
 
-static void *rk3288_gmac_setup(struct platform_device *pdev)
-{
-       return rk_gmac_setup(pdev, &rk3288_ops);
-}
-
-static void *rk3368_gmac_setup(struct platform_device *pdev)
-{
-       return rk_gmac_setup(pdev, &rk3368_ops);
-}
-
 static int rk_gmac_init(struct platform_device *pdev, void *priv)
 {
        struct rk_priv_data *bsp_priv = priv;
@@ -576,31 +566,52 @@ static void rk_fix_speed(void *priv, unsigned int speed)
                dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
 }
 
-static const struct stmmac_of_data rk3288_gmac_data = {
-       .has_gmac = 1,
-       .fix_mac_speed = rk_fix_speed,
-       .setup = rk3288_gmac_setup,
-       .init = rk_gmac_init,
-       .exit = rk_gmac_exit,
-};
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       const struct rk_gmac_ops *data;
+       int ret;
 
-static const struct stmmac_of_data rk3368_gmac_data = {
-       .has_gmac = 1,
-       .fix_mac_speed = rk_fix_speed,
-       .setup = rk3368_gmac_setup,
-       .init = rk_gmac_init,
-       .exit = rk_gmac_exit,
-};
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "no of match data provided\n");
+               return -EINVAL;
+       }
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
+       plat_dat->has_gmac = true;
+       plat_dat->init = rk_gmac_init;
+       plat_dat->exit = rk_gmac_exit;
+       plat_dat->fix_mac_speed = rk_fix_speed;
+
+       plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+       if (IS_ERR(plat_dat->bsp_priv))
+               return PTR_ERR(plat_dat->bsp_priv);
+
+       ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+               return ret;
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id rk_gmac_dwmac_match[] = {
-       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
-       { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+       { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
        { }
 };
 MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
 
 static struct platform_driver rk_gmac_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = rk_gmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "rk_gmac-dwmac",
index 8141c5b844ae681160fbf44b69e37f359492db85..401383b252a8f079aba4688afd6af1a53726b962 100644 (file)
@@ -175,31 +175,6 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
        return 0;
 }
 
-static void *socfpga_dwmac_probe(struct platform_device *pdev)
-{
-       struct device           *dev = &pdev->dev;
-       int                     ret;
-       struct socfpga_dwmac    *dwmac;
-
-       dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
-       if (!dwmac)
-               return ERR_PTR(-ENOMEM);
-
-       ret = socfpga_dwmac_parse_data(dwmac, dev);
-       if (ret) {
-               dev_err(dev, "Unable to parse OF data\n");
-               return ERR_PTR(ret);
-       }
-
-       ret = socfpga_dwmac_setup(dwmac);
-       if (ret) {
-               dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
-               return ERR_PTR(ret);
-       }
-
-       return dwmac;
-}
-
 static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
 {
        struct socfpga_dwmac    *dwmac = priv;
@@ -257,21 +232,58 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
        return ret;
 }
 
-static const struct stmmac_of_data socfpga_gmac_data = {
-       .setup = socfpga_dwmac_probe,
-       .init = socfpga_dwmac_init,
-       .exit = socfpga_dwmac_exit,
-       .fix_mac_speed = socfpga_dwmac_fix_mac_speed,
-};
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       struct device           *dev = &pdev->dev;
+       int                     ret;
+       struct socfpga_dwmac    *dwmac;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
+       dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+       if (!dwmac)
+               return -ENOMEM;
+
+       ret = socfpga_dwmac_parse_data(dwmac, dev);
+       if (ret) {
+               dev_err(dev, "Unable to parse OF data\n");
+               return ret;
+       }
+
+       ret = socfpga_dwmac_setup(dwmac);
+       if (ret) {
+               dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+               return ret;
+       }
+
+       plat_dat->bsp_priv = dwmac;
+       plat_dat->init = socfpga_dwmac_init;
+       plat_dat->exit = socfpga_dwmac_exit;
+       plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+       ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+               return ret;
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id socfpga_dwmac_match[] = {
-       { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+       { .compatible = "altr,socfpga-stmmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
 
 static struct platform_driver socfpga_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = socfpga_dwmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "socfpga-dwmac",
index a2e8111c5d14302ffafb6f7fcd9db9a3db7e00e3..7f6f4a4fcc708973af0aa48418bedaad984ef5fd 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_net.h>
 
 #include "stmmac_platform.h"
@@ -128,6 +129,11 @@ struct sti_dwmac {
        struct device *dev;
        struct regmap *regmap;
        u32 speed;
+       void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+       void (*fix_retime_src)(void *priv, unsigned int speed);
 };
 
 static u32 phy_intf_sels[] = {
@@ -222,8 +228,9 @@ static void stid127_fix_retime_src(void *priv, u32 spd)
        regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
 }
 
-static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
 {
+       struct sti_dwmac *dwmac = priv;
        struct regmap *regmap = dwmac->regmap;
        int iface = dwmac->interface;
        struct device *dev = dwmac->dev;
@@ -241,28 +248,8 @@ static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
 
        val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
        regmap_update_bits(regmap, reg, ENMII_MASK, val);
-}
-
-static int stix4xx_init(struct platform_device *pdev, void *priv)
-{
-       struct sti_dwmac *dwmac = priv;
-       u32 spd = dwmac->speed;
-
-       sti_dwmac_ctrl_init(dwmac);
-
-       stih4xx_fix_retime_src(priv, spd);
-
-       return 0;
-}
 
-static int stid127_init(struct platform_device *pdev, void *priv)
-{
-       struct sti_dwmac *dwmac = priv;
-       u32 spd = dwmac->speed;
-
-       sti_dwmac_ctrl_init(dwmac);
-
-       stid127_fix_retime_src(priv, spd);
+       dwmac->fix_retime_src(priv, dwmac->speed);
 
        return 0;
 }
@@ -334,36 +321,58 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
        return 0;
 }
 
-static void *sti_dwmac_setup(struct platform_device *pdev)
+static int sti_dwmac_probe(struct platform_device *pdev)
 {
+       struct plat_stmmacenet_data *plat_dat;
+       const struct sti_dwmac_of_data *data;
+       struct stmmac_resources stmmac_res;
        struct sti_dwmac *dwmac;
        int ret;
 
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "No OF match data provided\n");
+               return -EINVAL;
+       }
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
        dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
        if (!dwmac)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        ret = sti_dwmac_parse_data(dwmac, pdev);
        if (ret) {
                dev_err(&pdev->dev, "Unable to parse OF data\n");
-               return ERR_PTR(ret);
+               return ret;
        }
 
-       return dwmac;
+       dwmac->fix_retime_src = data->fix_retime_src;
+
+       plat_dat->bsp_priv = dwmac;
+       plat_dat->init = sti_dwmac_init;
+       plat_dat->exit = sti_dwmac_exit;
+       plat_dat->fix_mac_speed = data->fix_retime_src;
+
+       ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+               return ret;
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data stih4xx_dwmac_data = {
-       .fix_mac_speed = stih4xx_fix_retime_src,
-       .setup = sti_dwmac_setup,
-       .init = stix4xx_init,
-       .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+       .fix_retime_src = stih4xx_fix_retime_src,
 };
 
-static const struct stmmac_of_data stid127_dwmac_data = {
-       .fix_mac_speed = stid127_fix_retime_src,
-       .setup = sti_dwmac_setup,
-       .init = stid127_init,
-       .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+       .fix_retime_src = stid127_fix_retime_src,
 };
 
 static const struct of_device_id sti_dwmac_match[] = {
@@ -376,7 +385,7 @@ static const struct of_device_id sti_dwmac_match[] = {
 MODULE_DEVICE_TABLE(of, sti_dwmac_match);
 
 static struct platform_driver sti_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = sti_dwmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "sti-dwmac",
index 15048ca397591acb90f746a0e3b7bfbf5b9c9e86..52b8ed9bd87c2e20707c41ab0eaac247de26afce 100644 (file)
@@ -33,35 +33,6 @@ struct sunxi_priv_data {
        struct regulator *regulator;
 };
 
-static void *sun7i_gmac_setup(struct platform_device *pdev)
-{
-       struct sunxi_priv_data *gmac;
-       struct device *dev = &pdev->dev;
-
-       gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
-       if (!gmac)
-               return ERR_PTR(-ENOMEM);
-
-       gmac->interface = of_get_phy_mode(dev->of_node);
-
-       gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
-       if (IS_ERR(gmac->tx_clk)) {
-               dev_err(dev, "could not get tx clock\n");
-               return gmac->tx_clk;
-       }
-
-       /* Optional regulator for PHY */
-       gmac->regulator = devm_regulator_get_optional(dev, "phy");
-       if (IS_ERR(gmac->regulator)) {
-               if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
-                       return ERR_PTR(-EPROBE_DEFER);
-               dev_info(dev, "no regulator found\n");
-               gmac->regulator = NULL;
-       }
-
-       return gmac;
-}
-
 #define SUN7I_GMAC_GMII_RGMII_RATE     125000000
 #define SUN7I_GMAC_MII_RATE            25000000
 
@@ -132,25 +103,67 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
        }
 }
 
-/* of_data specifying hardware features and callbacks.
- * hardware features were copied from Allwinner drivers. */
-static const struct stmmac_of_data sun7i_gmac_data = {
-       .has_gmac = 1,
-       .tx_coe = 1,
-       .fix_mac_speed = sun7i_fix_speed,
-       .setup = sun7i_gmac_setup,
-       .init = sun7i_gmac_init,
-       .exit = sun7i_gmac_exit,
-};
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       struct sunxi_priv_data *gmac;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
+       gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+       if (!gmac)
+               return -ENOMEM;
+
+       gmac->interface = of_get_phy_mode(dev->of_node);
+
+       gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+       if (IS_ERR(gmac->tx_clk)) {
+               dev_err(dev, "could not get tx clock\n");
+               return PTR_ERR(gmac->tx_clk);
+       }
+
+       /* Optional regulator for PHY */
+       gmac->regulator = devm_regulator_get_optional(dev, "phy");
+       if (IS_ERR(gmac->regulator)) {
+               if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_info(dev, "no regulator found\n");
+               gmac->regulator = NULL;
+       }
+
+       /* platform data specifying hardware features and callbacks.
+        * hardware features were copied from Allwinner drivers. */
+       plat_dat->tx_coe = 1;
+       plat_dat->has_gmac = true;
+       plat_dat->bsp_priv = gmac;
+       plat_dat->init = sun7i_gmac_init;
+       plat_dat->exit = sun7i_gmac_exit;
+       plat_dat->fix_mac_speed = sun7i_fix_speed;
+
+       ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+               return ret;
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id sun7i_dwmac_match[] = {
-       { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+       { .compatible = "allwinner,sun7i-a20-gmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
 
 static struct platform_driver sun7i_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = sun7i_gmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "sun7i-dwmac",
index 50f7a7a26821c7a40cb73294a4844406ea9caee5..864b476f7fd5a33b81ac2f6ea9b08e0cf99d299d 100644 (file)
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
        if (res->mac)
                memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
 
-       dev_set_drvdata(device, priv);
+       dev_set_drvdata(device, priv->dev);
 
        /* Verify driver arguments */
        stmmac_verify_args();
index f3918c7e7eeb373a6736bb5145c33320acbabc53..d02691ba3d7feb15ec7e783db7d9086924a6f7fc 100644 (file)
@@ -104,32 +104,16 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
  * this function is to read the driver parameters from device-tree and
  * set some private fields that will be used by the main at runtime.
  */
-static int stmmac_probe_config_dt(struct platform_device *pdev,
-                                 struct plat_stmmacenet_data *plat,
-                                 const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
        struct device_node *np = pdev->dev.of_node;
+       struct plat_stmmacenet_data *plat;
        struct stmmac_dma_cfg *dma_cfg;
-       const struct of_device_id *device;
-       struct device *dev = &pdev->dev;
-
-       device = of_match_device(dev->driver->of_match_table, dev);
-       if (device->data) {
-               const struct stmmac_of_data *data = device->data;
-               plat->has_gmac = data->has_gmac;
-               plat->enh_desc = data->enh_desc;
-               plat->tx_coe = data->tx_coe;
-               plat->rx_coe = data->rx_coe;
-               plat->bugged_jumbo = data->bugged_jumbo;
-               plat->pmt = data->pmt;
-               plat->riwt_off = data->riwt_off;
-               plat->fix_mac_speed = data->fix_mac_speed;
-               plat->bus_setup = data->bus_setup;
-               plat->setup = data->setup;
-               plat->free = data->free;
-               plat->init = data->init;
-               plat->exit = data->exit;
-       }
+
+       plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+       if (!plat)
+               return ERR_PTR(-ENOMEM);
 
        *mac = of_get_mac_address(np);
        plat->interface = of_get_phy_mode(np);
@@ -151,7 +135,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
        /* If phy-handle is not specified, check if we have a fixed-phy */
        if (!plat->phy_node && of_phy_is_fixed_link(np)) {
                if ((of_phy_register_fixed_link(np) < 0))
-                       return -ENODEV;
+                       return ERR_PTR(-ENODEV);
 
                plat->phy_node = of_node_get(np);
        }
@@ -182,6 +166,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
         */
        plat->maxmtu = JUMBO_LEN;
 
+       /* Set default value for multicast hash bins */
+       plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+       /* Set default value for unicast filter entries */
+       plat->unicast_filter_entries = 1;
+
        /*
         * Currently only the properties needed on SPEAr600
         * are provided. All other properties should be added
@@ -222,7 +212,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
                                       GFP_KERNEL);
                if (!dma_cfg) {
                        of_node_put(np);
-                       return -ENOMEM;
+                       return ERR_PTR(-ENOMEM);
                }
                plat->dma_cfg = dma_cfg;
                of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
@@ -240,44 +230,34 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
                pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
        }
 
-       return 0;
+       return plat;
 }
 #else
-static int stmmac_probe_config_dt(struct platform_device *pdev,
-                                 struct plat_stmmacenet_data *plat,
-                                 const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
-       return -ENOSYS;
+       return ERR_PTR(-ENOSYS);
 }
 #endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
 
-/**
- * stmmac_pltfr_probe - platform driver probe.
- * @pdev: platform device pointer
- * Description: platform_device probe function. It is to allocate
- * the necessary platform resources, invoke custom helper (if required) and
- * invoke the main probe function.
- */
-int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_get_platform_resources(struct platform_device *pdev,
+                                 struct stmmac_resources *stmmac_res)
 {
-       struct stmmac_resources stmmac_res;
-       int ret = 0;
        struct resource *res;
-       struct device *dev = &pdev->dev;
-       struct plat_stmmacenet_data *plat_dat = NULL;
 
-       memset(&stmmac_res, 0, sizeof(stmmac_res));
+       memset(stmmac_res, 0, sizeof(*stmmac_res));
 
        /* Get IRQ information early to have an ability to ask for deferred
         * probe if needed before we went too far with resource allocation.
         */
-       stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
-       if (stmmac_res.irq < 0) {
-               if (stmmac_res.irq != -EPROBE_DEFER) {
-                       dev_err(dev,
+       stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
+       if (stmmac_res->irq < 0) {
+               if (stmmac_res->irq != -EPROBE_DEFER) {
+                       dev_err(&pdev->dev,
                                "MAC IRQ configuration information not found\n");
                }
-               return stmmac_res.irq;
+               return stmmac_res->irq;
        }
 
        /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -287,64 +267,23 @@ int stmmac_pltfr_probe(struct platform_device *pdev)
         * In case the wake up interrupt is not passed from the platform
         * so the driver will continue to use the mac irq (ndev->irq)
         */
-       stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-       if (stmmac_res.wol_irq < 0) {
-               if (stmmac_res.wol_irq == -EPROBE_DEFER)
+       stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       if (stmmac_res->wol_irq < 0) {
+               if (stmmac_res->wol_irq == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
-               stmmac_res.wol_irq = stmmac_res.irq;
+               stmmac_res->wol_irq = stmmac_res->irq;
        }
 
-       stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-       if (stmmac_res.lpi_irq == -EPROBE_DEFER)
+       stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+       if (stmmac_res->lpi_irq == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       stmmac_res.addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(stmmac_res.addr))
-               return PTR_ERR(stmmac_res.addr);
-
-       plat_dat = dev_get_platdata(&pdev->dev);
-
-       if (!plat_dat)
-               plat_dat = devm_kzalloc(&pdev->dev,
-                                       sizeof(struct plat_stmmacenet_data),
-                                       GFP_KERNEL);
-       if (!plat_dat) {
-               pr_err("%s: ERROR: no memory", __func__);
-               return  -ENOMEM;
-       }
-
-       /* Set default value for multicast hash bins */
-       plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+       stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
 
-       /* Set default value for unicast filter entries */
-       plat_dat->unicast_filter_entries = 1;
-
-       if (pdev->dev.of_node) {
-               ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
-               if (ret) {
-                       pr_err("%s: main dt probe failed", __func__);
-                       return ret;
-               }
-       }
-
-       /* Custom setup (if needed) */
-       if (plat_dat->setup) {
-               plat_dat->bsp_priv = plat_dat->setup(pdev);
-               if (IS_ERR(plat_dat->bsp_priv))
-                       return PTR_ERR(plat_dat->bsp_priv);
-       }
-
-       /* Custom initialisation (if needed)*/
-       if (plat_dat->init) {
-               ret = plat_dat->init(pdev, plat_dat->bsp_priv);
-               if (unlikely(ret))
-                       return ret;
-       }
-
-       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       return PTR_ERR_OR_ZERO(stmmac_res->addr);
 }
-EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
+EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
 
 /**
  * stmmac_pltfr_remove
@@ -361,9 +300,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
        if (priv->plat->exit)
                priv->plat->exit(pdev, priv->plat->bsp_priv);
 
-       if (priv->plat->free)
-               priv->plat->free(pdev, priv->plat->bsp_priv);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
@@ -413,3 +349,7 @@ static int stmmac_pltfr_resume(struct device *dev)
 SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
                                       stmmac_pltfr_resume);
 EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
index 71da86d7bd00d9f7feb0a501cce0e6bbb4cb7298..ffeb8d9e2b2ef9f55f5b788d1853c208cf38fde4 100644 (file)
 #ifndef __STMMAC_PLATFORM_H__
 #define __STMMAC_PLATFORM_H__
 
-int stmmac_pltfr_probe(struct platform_device *pdev);
+#include "stmmac.h"
+
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+                                 struct stmmac_resources *stmmac_res);
+
 int stmmac_pltfr_remove(struct platform_device *pdev);
 extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
 
index 0c5842aeb807014c632a2d713b366133d7021f56..ab6051a43134f4dd7296679ffe089203162e3aa7 100644 (file)
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
                struct sk_buff *skb_new;
 
                skb_new = skb_realloc_headroom(skb, len);
-               if (!skb_new) {
-                       rp->tx_errors++;
+               if (!skb_new)
                        goto out_drop;
-               }
                kfree_skb(skb);
                skb = skb_new;
        } else
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644 (file)
index 0000000..a8f3151
--- /dev/null
@@ -0,0 +1,27 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+       bool "Synopsys devices"
+       default y
+       ---help---
+         If you have a network (Ethernet) device belonging to this class, say Y.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Synopsys devices. If you say Y, you will be asked
+         for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config SYNOPSYS_DWC_ETH_QOS
+       tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
+       select PHYLIB
+       select CRC32
+       select MII
+       depends on OF
+       ---help---
+         This driver supports the DWC Ethernet QoS from Synopsys
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644 (file)
index 0000000..7a37572
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
new file mode 100644 (file)
index 0000000..85b3326
--- /dev/null
@@ -0,0 +1,3019 @@
+/*  Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ *  This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
+ *  This version introduced a lot of changes which breaks backwards
+ *  compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
+ *  Some fields differ between version 4.00a and 4.10a, mainly the interrupt
+ *  bit fields. The driver could be made compatible with 4.00, if all relevant
+ *  HW erratas are handled.
+ *
+ *  The GMAC is highly configurable at synthesis time. This driver has been
+ *  developed for a subset of the total available feature set. Currently
+ *  it supports:
+ *  - TSO
+ *  - Checksum offload for RX and TX.
+ *  - Energy efficient ethernet.
+ *  - GMII phy interface.
+ *  - The statistics module.
+ *  - Single RX and TX queue.
+ *
+ *  Copyright (C) 2015 Axis Communications AB.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/tcp.h>
+
+#define DRIVER_NAME                    "dwceqos"
+#define DRIVER_DESCRIPTION             "Synopsys DWC Ethernet QoS driver"
+#define DRIVER_VERSION                 "0.9"
+
+#define DWCEQOS_MSG_DEFAULT    (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+       NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
+
+#define DWCEQOS_LPI_TIMER_MIN      8
+#define DWCEQOS_LPI_TIMER_MAX      ((1 << 20) - 1)
+
+#define DWCEQOS_RX_BUF_SIZE 2048
+
+#define DWCEQOS_RX_DCNT 256
+#define DWCEQOS_TX_DCNT 256
+
+#define DWCEQOS_HASH_TABLE_SIZE 64
+
+/* The size field in the DMA descriptor is 14 bits */
+#define BYTES_PER_DMA_DESC 16376
+
+/* Hardware registers */
+#define START_MAC_REG_OFFSET    0x0000
+#define MAX_MAC_REG_OFFSET      0x0bd0
+#define START_MTL_REG_OFFSET    0x0c00
+#define MAX_MTL_REG_OFFSET      0x0d7c
+#define START_DMA_REG_OFFSET    0x1000
+#define MAX_DMA_REG_OFFSET      0x117C
+
+#define REG_SPACE_SIZE          0x1800
+
+/* DMA */
+#define REG_DWCEQOS_DMA_MODE             0x1000
+#define REG_DWCEQOS_DMA_SYSBUS_MODE      0x1004
+#define REG_DWCEQOS_DMA_IS               0x1008
+#define REG_DWCEQOS_DMA_DEBUG_ST0        0x100c
+
+/* DMA channel registers */
+#define REG_DWCEQOS_DMA_CH0_CTRL         0x1100
+#define REG_DWCEQOS_DMA_CH0_TX_CTRL      0x1104
+#define REG_DWCEQOS_DMA_CH0_RX_CTRL      0x1108
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST  0x1114
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST  0x111c
+#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL  0x1120
+#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL  0x1128
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN   0x112c
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN   0x1130
+#define REG_DWCEQOS_DMA_CH0_IE           0x1134
+#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC   0x1144
+#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC   0x114c
+#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF    0x1154
+#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG    0x115c
+#define REG_DWCEQOS_DMA_CH0_STA          0x1160
+
+#define DWCEQOS_DMA_MODE_TXPR            BIT(11)
+#define DWCEQOS_DMA_MODE_DA              BIT(1)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI   BIT(31)
+#define DWCEQOS_DMA_SYSBUS_MODE_FB       BIT(0)
+#define DWCEQOS_DMA_SYSBUS_MODE_AAL      BIT(12)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
+       (((x) << 16) & 0x000F0000)
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK       GENMASK(19, 16)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
+       (((x) << 24) & 0x0F000000)
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK       GENMASK(27, 24)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
+       (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT   GENMASK(3, 1)
+
+#define DWCEQOS_DMA_CH_CTRL_PBLX8       BIT(16)
+#define DWCEQOS_DMA_CH_CTRL_DSL(x)      ((x) << 18)
+
+#define DWCEQOS_DMA_CH_CTRL_PBL(x)       ((x) << 16)
+#define DWCEQOS_DMA_CH_CTRL_START         BIT(0)
+#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x)   ((x) << 1)
+#define DWCEQOS_DMA_CH_TX_OSP            BIT(4)
+#define DWCEQOS_DMA_CH_TX_TSE            BIT(12)
+
+#define DWCEQOS_DMA_CH0_IE_NIE           BIT(15)
+#define DWCEQOS_DMA_CH0_IE_AIE           BIT(14)
+#define DWCEQOS_DMA_CH0_IE_RIE           BIT(6)
+#define DWCEQOS_DMA_CH0_IE_TIE           BIT(0)
+#define DWCEQOS_DMA_CH0_IE_FBEE          BIT(12)
+#define DWCEQOS_DMA_CH0_IE_RBUE          BIT(7)
+
+#define DWCEQOS_DMA_IS_DC0IS             BIT(0)
+#define DWCEQOS_DMA_IS_MTLIS             BIT(16)
+#define DWCEQOS_DMA_IS_MACIS             BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_TI            BIT(0)
+#define DWCEQOS_DMA_CH0_IS_RI            BIT(6)
+#define DWCEQOS_DMA_CH0_IS_RBU           BIT(7)
+#define DWCEQOS_DMA_CH0_IS_FBE           BIT(12)
+#define DWCEQOS_DMA_CH0_IS_CDE           BIT(13)
+#define DWCEQOS_DMA_CH0_IS_AIS           BIT(14)
+
+#define DWCEQOS_DMA_CH0_IS_TEB           GENMASK(18, 16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ   BIT(16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR  BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_REB           GENMASK(21, 19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ   BIT(19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR  BIT(20)
+
+/* DMA descriptor bits for RX normal descriptor (read format) */
+#define DWCEQOS_DMA_RDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_RDES3_INTE    BIT(30)
+#define DWCEQOS_DMA_RDES3_BUF2V   BIT(25)
+#define DWCEQOS_DMA_RDES3_BUF1V   BIT(24)
+
+/* DMA descriptor bits for RX normal descriptor (write back format) */
+#define DWCEQOS_DMA_RDES1_IPCE    BIT(7)
+#define DWCEQOS_DMA_RDES3_ES      BIT(15)
+#define DWCEQOS_DMA_RDES3_E_JT    BIT(14)
+#define DWCEQOS_DMA_RDES3_PL(x)   ((x) & 0x7fff)
+#define DWCEQOS_DMA_RDES1_PT      0x00000007
+#define DWCEQOS_DMA_RDES1_PT_UDP  BIT(0)
+#define DWCEQOS_DMA_RDES1_PT_TCP  BIT(1)
+#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
+
+/* DMA descriptor bits for TX normal descriptor (read format) */
+#define DWCEQOS_DMA_TDES2_IOC     BIT(31)
+#define DWCEQOS_DMA_TDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_TDES3_CTXT    BIT(30)
+#define DWCEQOS_DMA_TDES3_FD      BIT(29)
+#define DWCEQOS_DMA_TDES3_LD      BIT(28)
+#define DWCEQOS_DMA_TDES3_CIPH    BIT(16)
+#define DWCEQOS_DMA_TDES3_CIPP    BIT(17)
+#define DWCEQOS_DMA_TDES3_CA      0x00030000
+#define DWCEQOS_DMA_TDES3_TSE     BIT(18)
+#define DWCEQOS_DMA_DES3_THL(x)   ((x) << 19)
+#define DWCEQOS_DMA_DES2_B2L(x)   ((x) << 16)
+
+#define DWCEQOS_DMA_TDES3_TCMSSV    BIT(26)
+
+/* DMA channel states */
+#define DMA_TX_CH_STOPPED   0
+#define DMA_TX_CH_SUSPENDED 6
+
+#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
+
+/* MTL */
+#define REG_DWCEQOS_MTL_OPER             0x0c00
+#define REG_DWCEQOS_MTL_DEBUG_ST         0x0c0c
+#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST    0x0d08
+#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST    0x0d38
+
+#define REG_DWCEQOS_MTL_IS               0x0c20
+#define REG_DWCEQOS_MTL_TXQ0_OPER        0x0d00
+#define REG_DWCEQOS_MTL_RXQ0_OPER        0x0d30
+#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT     0x0d34
+#define REG_DWCEQOS_MTL_RXQ0_CTRL         0x0d3c
+
+#define REG_DWCEQOS_MTL_Q0_ISCTRL         0x0d2c
+
+#define DWCEQOS_MTL_SCHALG_STRICT        0x00000060
+
+#define DWCEQOS_MTL_TXQ_TXQEN            BIT(3)
+#define DWCEQOS_MTL_TXQ_TSF              BIT(1)
+#define DWCEQOS_MTL_TXQ_FTQ              BIT(0)
+#define DWCEQOS_MTL_TXQ_TTC512           0x00000070
+
+#define DWCEQOS_MTL_TXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 8)
+
+#define DWCEQOS_MTL_RXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 12)
+#define DWCEQOS_MTL_RXQ_EHFC             BIT(7)
+#define DWCEQOS_MTL_RXQ_DIS_TCP_EF       BIT(6)
+#define DWCEQOS_MTL_RXQ_FEP              BIT(4)
+#define DWCEQOS_MTL_RXQ_FUP              BIT(3)
+#define DWCEQOS_MTL_RXQ_RSF              BIT(5)
+#define DWCEQOS_MTL_RXQ_RTC32            BIT(0)
+
+/* MAC */
+#define REG_DWCEQOS_MAC_CFG              0x0000
+#define REG_DWCEQOS_MAC_EXT_CFG          0x0004
+#define REG_DWCEQOS_MAC_PKT_FILT         0x0008
+#define REG_DWCEQOS_MAC_WD_TO            0x000c
+#define REG_DWCEQOS_HASTABLE_LO          0x0010
+#define REG_DWCEQOS_HASTABLE_HI          0x0014
+#define REG_DWCEQOS_MAC_IS               0x00b0
+#define REG_DWCEQOS_MAC_IE               0x00b4
+#define REG_DWCEQOS_MAC_STAT             0x00b8
+#define REG_DWCEQOS_MAC_MDIO_ADDR        0x0200
+#define REG_DWCEQOS_MAC_MDIO_DATA        0x0204
+#define REG_DWCEQOS_MAC_MAC_ADDR0_HI     0x0300
+#define REG_DWCEQOS_MAC_MAC_ADDR0_LO     0x0304
+#define REG_DWCEQOS_MAC_RXQ0_CTRL0       0x00a0
+#define REG_DWCEQOS_MAC_HW_FEATURE0      0x011c
+#define REG_DWCEQOS_MAC_HW_FEATURE1      0x0120
+#define REG_DWCEQOS_MAC_HW_FEATURE2      0x0124
+#define REG_DWCEQOS_MAC_HASHTABLE_LO     0x0010
+#define REG_DWCEQOS_MAC_HASHTABLE_HI     0x0014
+#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS  0x00d0
+#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL  0x00d4
+#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER  0x00d8
+#define REG_DWCEQOS_MAC_1US_TIC_COUNTER  0x00dc
+#define REG_DWCEQOS_MAC_RX_FLOW_CTRL     0x0090
+#define REG_DWCEQOS_MAC_Q0_TX_FLOW      0x0070
+
+#define DWCEQOS_MAC_CFG_ACS              BIT(20)
+#define DWCEQOS_MAC_CFG_JD               BIT(17)
+#define DWCEQOS_MAC_CFG_JE               BIT(16)
+#define DWCEQOS_MAC_CFG_PS               BIT(15)
+#define DWCEQOS_MAC_CFG_FES              BIT(14)
+#define DWCEQOS_MAC_CFG_DM               BIT(13)
+#define DWCEQOS_MAC_CFG_DO               BIT(10)
+#define DWCEQOS_MAC_CFG_TE               BIT(1)
+#define DWCEQOS_MAC_CFG_IPC              BIT(27)
+#define DWCEQOS_MAC_CFG_RE               BIT(0)
+
+#define DWCEQOS_ADDR_HIGH(reg)           (0x00000300 + (reg * 8))
+#define DWCEQOS_ADDR_LOW(reg)            (0x00000304 + (reg * 8))
+
+#define DWCEQOS_MAC_IS_LPI_INT           BIT(5)
+#define DWCEQOS_MAC_IS_MMC_INT           BIT(8)
+
+#define DWCEQOS_MAC_RXQ_EN               BIT(1)
+#define DWCEQOS_MAC_MAC_ADDR_HI_EN       BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_RA          BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_HPF         BIT(10)
+#define DWCEQOS_MAC_PKT_FILT_SAF         BIT(9)
+#define DWCEQOS_MAC_PKT_FILT_SAIF        BIT(8)
+#define DWCEQOS_MAC_PKT_FILT_DBF         BIT(5)
+#define DWCEQOS_MAC_PKT_FILT_PM          BIT(4)
+#define DWCEQOS_MAC_PKT_FILT_DAIF        BIT(3)
+#define DWCEQOS_MAC_PKT_FILT_HMC         BIT(2)
+#define DWCEQOS_MAC_PKT_FILT_HUC         BIT(1)
+#define DWCEQOS_MAC_PKT_FILT_PR          BIT(0)
+
+#define DWCEQOS_MAC_MDIO_ADDR_CR(x)      (((x & 15)) << 8)
+#define DWCEQOS_MAC_MDIO_ADDR_CR_20      2
+#define DWCEQOS_MAC_MDIO_ADDR_CR_35      3
+#define DWCEQOS_MAC_MDIO_ADDR_CR_60      0
+#define DWCEQOS_MAC_MDIO_ADDR_CR_100     1
+#define DWCEQOS_MAC_MDIO_ADDR_CR_150     4
+#define DWCEQOS_MAC_MDIO_ADDR_CR_250     5
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ   0x0000000c
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE  BIT(2)
+#define DWCEQOS_MAC_MDIO_ADDR_GB         BIT(0)
+
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN  BIT(0)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX  BIT(1)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN  BIT(2)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX  BIT(3)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST  BIT(8)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST  BIT(9)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN   BIT(16)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS     BIT(17)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN   BIT(18)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA  BIT(19)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE   BIT(20)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
+
+#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x)  ((x) & GENMASK(11, 0))
+
+#define DWCEQOS_LPI_CTRL_ENABLE_EEE      (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
+                                         DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
+                                         DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
+
+#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define DWCEQOS_MAC_Q0_TX_FLOW_TFE   BIT(1)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time)        ((time) << 16)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
+
+/* Features */
+#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
+#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
+#define DWCEQOS_MAC_HW_FEATURE0_HDSEL    BIT(2)
+#define DWCEQOS_MAC_HW_FEATURE0_EEESEL   BIT(13)
+#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL  BIT(1)
+#define DWCEQOS_MAC_HW_FEATURE0_MIISEL   BIT(0)
+
+#define DWCEQOS_MAC_HW_FEATURE1_TSOEN    BIT(18)
+#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
+#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x)  (128 << ((x) & 0x1f))
+
+#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
+       (1 + (((feature1) & 0x1fc0000) >> 18))
+
+#define DWCEQOS_MDIO_PHYADDR(x)     (((x) & 0x1f) << 21)
+#define DWCEQOS_MDIO_PHYREG(x)      (((x) & 0x1f) << 16)
+
+#define DWCEQOS_DMA_MODE_SWR            BIT(0)
+
+#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
+
+/* Mac Management Counters */
+#define REG_DWCEQOS_MMC_CTRL             0x0700
+#define REG_DWCEQOS_MMC_RXIRQ            0x0704
+#define REG_DWCEQOS_MMC_TXIRQ            0x0708
+#define REG_DWCEQOS_MMC_RXIRQMASK        0x070c
+#define REG_DWCEQOS_MMC_TXIRQMASK        0x0710
+
+#define DWCEQOS_MMC_CTRL_CNTRST          BIT(0)
+#define DWCEQOS_MMC_CTRL_RSTONRD         BIT(2)
+
+#define DWC_MMC_TXLPITRANSCNTR           0x07F0
+#define DWC_MMC_TXLPIUSCNTR              0x07EC
+#define DWC_MMC_TXOVERSIZE_G             0x0778
+#define DWC_MMC_TXVLANPACKETS_G          0x0774
+#define DWC_MMC_TXPAUSEPACKETS           0x0770
+#define DWC_MMC_TXEXCESSDEF              0x076C
+#define DWC_MMC_TXPACKETCOUNT_G          0x0768
+#define DWC_MMC_TXOCTETCOUNT_G           0x0764
+#define DWC_MMC_TXCARRIERERROR           0x0760
+#define DWC_MMC_TXEXCESSCOL              0x075C
+#define DWC_MMC_TXLATECOL                0x0758
+#define DWC_MMC_TXDEFERRED               0x0754
+#define DWC_MMC_TXMULTICOL_G             0x0750
+#define DWC_MMC_TXSINGLECOL_G            0x074C
+#define DWC_MMC_TXUNDERFLOWERROR         0x0748
+#define DWC_MMC_TXBROADCASTPACKETS_GB    0x0744
+#define DWC_MMC_TXMULTICASTPACKETS_GB    0x0740
+#define DWC_MMC_TXUNICASTPACKETS_GB      0x073C
+#define DWC_MMC_TX1024TOMAXOCTETS_GB     0x0738
+#define DWC_MMC_TX512TO1023OCTETS_GB     0x0734
+#define DWC_MMC_TX256TO511OCTETS_GB      0x0730
+#define DWC_MMC_TX128TO255OCTETS_GB      0x072C
+#define DWC_MMC_TX65TO127OCTETS_GB       0x0728
+#define DWC_MMC_TX64OCTETS_GB            0x0724
+#define DWC_MMC_TXMULTICASTPACKETS_G     0x0720
+#define DWC_MMC_TXBROADCASTPACKETS_G     0x071C
+#define DWC_MMC_TXPACKETCOUNT_GB         0x0718
+#define DWC_MMC_TXOCTETCOUNT_GB          0x0714
+
+#define DWC_MMC_RXLPITRANSCNTR           0x07F8
+#define DWC_MMC_RXLPIUSCNTR              0x07F4
+#define DWC_MMC_RXCTRLPACKETS_G          0x07E4
+#define DWC_MMC_RXRCVERROR               0x07E0
+#define DWC_MMC_RXWATCHDOG               0x07DC
+#define DWC_MMC_RXVLANPACKETS_GB         0x07D8
+#define DWC_MMC_RXFIFOOVERFLOW           0x07D4
+#define DWC_MMC_RXPAUSEPACKETS           0x07D0
+#define DWC_MMC_RXOUTOFRANGETYPE         0x07CC
+#define DWC_MMC_RXLENGTHERROR            0x07C8
+#define DWC_MMC_RXUNICASTPACKETS_G       0x07C4
+#define DWC_MMC_RX1024TOMAXOCTETS_GB     0x07C0
+#define DWC_MMC_RX512TO1023OCTETS_GB     0x07BC
+#define DWC_MMC_RX256TO511OCTETS_GB      0x07B8
+#define DWC_MMC_RX128TO255OCTETS_GB      0x07B4
+#define DWC_MMC_RX65TO127OCTETS_GB       0x07B0
+#define DWC_MMC_RX64OCTETS_GB            0x07AC
+#define DWC_MMC_RXOVERSIZE_G             0x07A8
+#define DWC_MMC_RXUNDERSIZE_G            0x07A4
+#define DWC_MMC_RXJABBERERROR            0x07A0
+#define DWC_MMC_RXRUNTERROR              0x079C
+#define DWC_MMC_RXALIGNMENTERROR         0x0798
+#define DWC_MMC_RXCRCERROR               0x0794
+#define DWC_MMC_RXMULTICASTPACKETS_G     0x0790
+#define DWC_MMC_RXBROADCASTPACKETS_G     0x078C
+#define DWC_MMC_RXOCTETCOUNT_G           0x0788
+#define DWC_MMC_RXOCTETCOUNT_GB          0x0784
+#define DWC_MMC_RXPACKETCOUNT_GB         0x0780
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
+
+/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
+struct ring_desc {
+       struct sk_buff *skb;
+       dma_addr_t mapping;
+       size_t len;
+};
+
+/* DMA hardware descriptor */
+struct dwceqos_dma_desc {
+       u32     des0;
+       u32     des1;
+       u32     des2;
+       u32     des3;
+} ____cacheline_aligned;
+
+struct dwceqos_mmc_counters {
+       __u64 txlpitranscntr;
+       __u64 txpiuscntr;
+       __u64 txoversize_g;
+       __u64 txvlanpackets_g;
+       __u64 txpausepackets;
+       __u64 txexcessdef;
+       __u64 txpacketcount_g;
+       __u64 txoctetcount_g;
+       __u64 txcarriererror;
+       __u64 txexcesscol;
+       __u64 txlatecol;
+       __u64 txdeferred;
+       __u64 txmulticol_g;
+       __u64 txsinglecol_g;
+       __u64 txunderflowerror;
+       __u64 txbroadcastpackets_gb;
+       __u64 txmulticastpackets_gb;
+       __u64 txunicastpackets_gb;
+       __u64 tx1024tomaxoctets_gb;
+       __u64 tx512to1023octets_gb;
+       __u64 tx256to511octets_gb;
+       __u64 tx128to255octets_gb;
+       __u64 tx65to127octets_gb;
+       __u64 tx64octets_gb;
+       __u64 txmulticastpackets_g;
+       __u64 txbroadcastpackets_g;
+       __u64 txpacketcount_gb;
+       __u64 txoctetcount_gb;
+
+       __u64 rxlpitranscntr;
+       __u64 rxlpiuscntr;
+       __u64 rxctrlpackets_g;
+       __u64 rxrcverror;
+       __u64 rxwatchdog;
+       __u64 rxvlanpackets_gb;
+       __u64 rxfifooverflow;
+       __u64 rxpausepackets;
+       __u64 rxoutofrangetype;
+       __u64 rxlengtherror;
+       __u64 rxunicastpackets_g;
+       __u64 rx1024tomaxoctets_gb;
+       __u64 rx512to1023octets_gb;
+       __u64 rx256to511octets_gb;
+       __u64 rx128to255octets_gb;
+       __u64 rx65to127octets_gb;
+       __u64 rx64octets_gb;
+       __u64 rxoversize_g;
+       __u64 rxundersize_g;
+       __u64 rxjabbererror;
+       __u64 rxrunterror;
+       __u64 rxalignmenterror;
+       __u64 rxcrcerror;
+       __u64 rxmulticastpackets_g;
+       __u64 rxbroadcastpackets_g;
+       __u64 rxoctetcount_g;
+       __u64 rxoctetcount_gb;
+       __u64 rxpacketcount_gb;
+};
+
+/* Ethtool statistics */
+
+struct dwceqos_stat {
+       const char stat_name[ETH_GSTRING_LEN];
+       int   offset;
+};
+
+#define STAT_ITEM(name, var) \
+       {\
+               name,\
+               offsetof(struct dwceqos_mmc_counters, var),\
+       }
+
+static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
+       STAT_ITEM("tx_bytes", txoctetcount_gb),
+       STAT_ITEM("tx_packets", txpacketcount_gb),
+       STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
+       STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
+       STAT_ITEM("tx_multicast_packets",  txmulticastpackets_gb),
+       STAT_ITEM("tx_pause_packets", txpausepackets),
+       STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
+       STAT_ITEM("tx_65_to_127_byte_packets",  tx65to127octets_gb),
+       STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
+       STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
+       STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+       STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
+       STAT_ITEM("tx_underflow_errors", txunderflowerror),
+       STAT_ITEM("tx_lpi_count", txlpitranscntr),
+
+       STAT_ITEM("rx_bytes", rxoctetcount_gb),
+       STAT_ITEM("rx_packets", rxpacketcount_gb),
+       STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
+       STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
+       STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
+       STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
+       STAT_ITEM("rx_pause_packets", rxpausepackets),
+       STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
+       STAT_ITEM("rx_65_to_127_byte_packets",  rx65to127octets_gb),
+       STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
+       STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
+       STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+       STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
+       STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
+       STAT_ITEM("rx_oversize_packets", rxoversize_g),
+       STAT_ITEM("rx_undersize_packets", rxundersize_g),
+       STAT_ITEM("rx_jabbers", rxjabbererror),
+       STAT_ITEM("rx_align_errors", rxalignmenterror),
+       STAT_ITEM("rx_crc_errors", rxcrcerror),
+       STAT_ITEM("rx_lpi_count", rxlpitranscntr),
+};
+
+/* Configuration of AXI bus parameters.
+ * These values depend on the parameters set on the MAC core as well
+ * as the AXI interconnect.
+ */
+struct dwceqos_bus_cfg {
+       /* Enable AXI low-power interface. */
+       bool en_lpi;
+       /* Limit on number of outstanding AXI write requests. */
+       u32 write_requests;
+       /* Limit on number of outstanding AXI read requests. */
+       u32 read_requests;
+       /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
+       u32 burst_map;
+       /* DMA Programmable burst length*/
+       u32 tx_pbl;
+       u32 rx_pbl;
+};
+
+struct dwceqos_flowcontrol {
+       int autoneg;
+       int rx;
+       int rx_current;
+       int tx;
+       int tx_current;
+};
+
+struct net_local {
+       void __iomem *baseaddr;
+       struct clk *phy_ref_clk;
+       struct clk *apb_pclk;
+
+       struct device_node *phy_node;
+       struct net_device *ndev;
+       struct platform_device *pdev;
+
+       u32 msg_enable;
+
+       struct tasklet_struct tx_bdreclaim_tasklet;
+       struct workqueue_struct *txtimeout_handler_wq;
+       struct work_struct txtimeout_reinit;
+
+       phy_interface_t phy_interface;
+       struct phy_device *phy_dev;
+       struct mii_bus *mii_bus;
+
+       unsigned int link;
+       unsigned int speed;
+       unsigned int duplex;
+
+       struct napi_struct napi;
+
+       /* DMA Descriptor Areas */
+       struct ring_desc *rx_skb;
+       struct ring_desc *tx_skb;
+
+       struct dwceqos_dma_desc *tx_descs;
+       struct dwceqos_dma_desc *rx_descs;
+
+       /* DMA Mapped Descriptor areas*/
+       dma_addr_t tx_descs_addr;
+       dma_addr_t rx_descs_addr;
+       dma_addr_t tx_descs_tail_addr;
+       dma_addr_t rx_descs_tail_addr;
+
+       size_t tx_free;
+       size_t tx_next;
+       size_t rx_cur;
+       size_t tx_cur;
+
+       /* Spinlocks for accessing DMA Descriptors */
+       spinlock_t tx_lock;
+
+       /* Spinlock for register read-modify-writes. */
+       spinlock_t hw_lock;
+
+       u32 feature0;
+       u32 feature1;
+       u32 feature2;
+
+       struct dwceqos_bus_cfg bus_cfg;
+       bool en_tx_lpi_clockgating;
+
+       int eee_enabled;
+       int eee_active;
+       int csr_val;
+       u32 gso_size;
+
+       struct dwceqos_mmc_counters mmc_counters;
+       /* Protect the mmc_counter updates. */
+       spinlock_t stats_lock;
+       u32 mmc_rx_counters_mask;
+       u32 mmc_tx_counters_mask;
+
+       struct dwceqos_flowcontrol flowcontrol;
+};
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+                                     u32 tx_mask);
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+                                 unsigned int reg_n);
+static int dwceqos_stop(struct net_device *ndev);
+static int dwceqos_open(struct net_device *ndev);
+static void dwceqos_tx_poll_demand(struct net_local *lp);
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
+
+static void dwceqos_reset_state(struct net_local *lp);
+
+#define dwceqos_read(lp, reg)                                          \
+       readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
+#define dwceqos_write(lp, reg, val)                                    \
+       writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
+
+static void dwceqos_reset_state(struct net_local *lp)
+{
+       lp->link    = 0;
+       lp->speed   = 0;
+       lp->duplex  = DUPLEX_UNKNOWN;
+       lp->flowcontrol.rx_current = 0;
+       lp->flowcontrol.tx_current = 0;
+       lp->eee_active = 0;
+       lp->eee_enabled = 0;
+}
+
+static void print_descriptor(struct net_local *lp, int index, int tx)
+{
+       struct dwceqos_dma_desc *dd;
+
+       if (tx)
+               dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
+       else
+               dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
+
+       pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
+               index, dd);
+       pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
+               dd->des3);
+}
+
+static void print_status(struct net_local *lp)
+{
+       size_t desci, i;
+
+       pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
+               lp->tx_cur, lp->tx_next);
+
+       print_descriptor(lp, lp->rx_cur, 0);
+
+       for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
+                i < DWCEQOS_TX_DCNT;
+                ++i) {
+               print_descriptor(lp, desci, 1);
+               desci = (desci + 1) % DWCEQOS_TX_DCNT;
+       }
+
+       pr_info("DMA_Debug_Status0:          0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
+       pr_info("DMA_CH0_Status:             0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
+       pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
+               dwceqos_read(lp, 0x1144));
+       pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
+               dwceqos_read(lp, 0x1154));
+       pr_info("MTL_Debug_Status:      0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
+       pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
+       pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
+       pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
+               dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
+}
+
+static void dwceqos_mdio_set_csr(struct net_local *lp)
+{
+       int rate = clk_get_rate(lp->apb_pclk);
+
+       if (rate <= 20000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
+       else if (rate <= 35000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
+       else if (rate <= 60000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
+       else if (rate <= 100000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
+       else if (rate <= 150000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
+       else if (rate <= 250000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
+}
+
+/* Simple MDIO functions implementing mii_bus */
+static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+       struct net_local *lp = bus->priv;
+       u32 regval;
+       int i;
+       int data;
+
+       regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+               DWCEQOS_MDIO_PHYREG(phyreg) |
+               DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+               DWCEQOS_MAC_MDIO_ADDR_GB |
+               DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+       for (i = 0; i < 5; ++i) {
+               usleep_range(64, 128);
+               if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+                     DWCEQOS_MAC_MDIO_ADDR_GB))
+                       break;
+       }
+
+       data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
+       if (i == 5) {
+               netdev_warn(lp->ndev, "MDIO read timed out\n");
+               data = 0xffff;
+       }
+
+       return data & 0xffff;
+}
+
+static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+                             u16 value)
+{
+       struct net_local *lp = bus->priv;
+       u32 regval;
+       int i;
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
+
+       regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+               DWCEQOS_MDIO_PHYREG(phyreg) |
+               DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+               DWCEQOS_MAC_MDIO_ADDR_GB |
+               DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+       for (i = 0; i < 5; ++i) {
+               usleep_range(64, 128);
+               if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+                     DWCEQOS_MAC_MDIO_ADDR_GB))
+                       break;
+       }
+       if (i == 5)
+               netdev_warn(lp->ndev, "MDIO write timed out\n");
+       return 0;
+}
+
+static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = lp->phy_dev;
+
+       if (!netif_running(ndev))
+               return -EINVAL;
+
+       if (!phydev)
+               return -ENODEV;
+
+       switch (cmd) {
+       case SIOCGMIIPHY:
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+               return phy_mii_ioctl(phydev, rq, cmd);
+       default:
+               dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
+               return -EOPNOTSUPP;
+       }
+}
+
+static void dwceqos_link_down(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       /* Indicate link down to the LPI state machine */
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+       regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_link_up(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       /* Indicate link up to the LPI state machine */
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+       regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+       lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+
+       /* Check for changed EEE capability */
+       if (!lp->eee_active && lp->eee_enabled) {
+               lp->eee_enabled = 0;
+
+               spin_lock_irqsave(&lp->hw_lock, flags);
+               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+               regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+               spin_unlock_irqrestore(&lp->hw_lock, flags);
+       }
+}
+
+static void dwceqos_set_speed(struct net_local *lp)
+{
+       struct phy_device *phydev = lp->phy_dev;
+       u32 regval;
+
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+       regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
+                   DWCEQOS_MAC_CFG_DM);
+
+       if (phydev->duplex)
+               regval |= DWCEQOS_MAC_CFG_DM;
+       if (phydev->speed == SPEED_10) {
+               regval |= DWCEQOS_MAC_CFG_PS;
+       } else if (phydev->speed == SPEED_100) {
+               regval |= DWCEQOS_MAC_CFG_PS |
+                       DWCEQOS_MAC_CFG_FES;
+       } else if (phydev->speed != SPEED_1000) {
+               netdev_err(lp->ndev,
+                          "unknown PHY speed %d\n",
+                          phydev->speed);
+               return;
+       }
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
+}
+
+static void dwceqos_adjust_link(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = lp->phy_dev;
+       int status_change = 0;
+
+       if (phydev->link) {
+               if ((lp->speed != phydev->speed) ||
+                   (lp->duplex != phydev->duplex)) {
+                       dwceqos_set_speed(lp);
+
+                       lp->speed = phydev->speed;
+                       lp->duplex = phydev->duplex;
+                       status_change = 1;
+               }
+
+               if (lp->flowcontrol.autoneg) {
+                       lp->flowcontrol.rx = phydev->pause ||
+                                            phydev->asym_pause;
+                       lp->flowcontrol.tx = phydev->pause ||
+                                            phydev->asym_pause;
+               }
+
+               if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
+                       if (netif_msg_link(lp))
+                               netdev_dbg(ndev, "set rx flow to %d\n",
+                                          lp->flowcontrol.rx);
+                       dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
+                       lp->flowcontrol.rx_current = lp->flowcontrol.rx;
+               }
+               if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
+                       if (netif_msg_link(lp))
+                               netdev_dbg(ndev, "set tx flow to %d\n",
+                                          lp->flowcontrol.tx);
+                       dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
+                       lp->flowcontrol.tx_current = lp->flowcontrol.tx;
+               }
+       }
+
+       if (phydev->link != lp->link) {
+               lp->link = phydev->link;
+               status_change = 1;
+       }
+
+       if (status_change) {
+               if (phydev->link) {
+                       lp->ndev->trans_start = jiffies;
+                       dwceqos_link_up(lp);
+               } else {
+                       dwceqos_link_down(lp);
+               }
+               phy_print_status(phydev);
+       }
+}
+
+static int dwceqos_mii_probe(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = NULL;
+
+       if (lp->phy_node) {
+               phydev = of_phy_connect(lp->ndev,
+                                       lp->phy_node,
+                                       &dwceqos_adjust_link,
+                                       0,
+                                       lp->phy_interface);
+
+               if (!phydev) {
+                       netdev_err(ndev, "no PHY found\n");
+                       return -1;
+               }
+       } else {
+               netdev_err(ndev, "no PHY configured\n");
+               return -ENODEV;
+       }
+
+       if (netif_msg_probe(lp))
+               netdev_dbg(lp->ndev,
+                          "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n",
+                          phydev, phydev->phy_id, phydev->addr);
+
+       phydev->supported &= PHY_GBIT_FEATURES;
+
+       lp->link    = 0;
+       lp->speed   = 0;
+       lp->duplex  = DUPLEX_UNKNOWN;
+       lp->phy_dev = phydev;
+
+       if (netif_msg_probe(lp)) {
+               netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n",
+                          lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+               netdev_dbg(lp->ndev, "attach [%s] phy driver\n",
+                          lp->phy_dev->drv->name);
+       }
+
+       return 0;
+}
+
+static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
+{
+       struct sk_buff *new_skb;
+       dma_addr_t new_skb_baddr = 0;
+
+       new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+       if (!new_skb) {
+               netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
+               goto err_out;
+       }
+
+       new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
+                                      new_skb->data, DWCEQOS_RX_BUF_SIZE,
+                                      DMA_FROM_DEVICE);
+       if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+               netdev_err(lp->ndev, "DMA map error\n");
+               dev_kfree_skb(new_skb);
+               new_skb = NULL;
+               goto err_out;
+       }
+
+       lp->rx_descs[index].des0 = new_skb_baddr;
+       lp->rx_descs[index].des1 = 0;
+       lp->rx_descs[index].des2 = 0;
+       lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
+                                  DWCEQOS_DMA_RDES3_BUF1V |
+                                  DWCEQOS_DMA_RDES3_OWN;
+
+       lp->rx_skb[index].mapping = new_skb_baddr;
+       lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
+
+err_out:
+       lp->rx_skb[index].skb = new_skb;
+}
+
+static void dwceqos_clean_rings(struct net_local *lp)
+{
+       int i;
+
+       if (lp->rx_skb) {
+               for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
+                       if (lp->rx_skb[i].skb) {
+                               dma_unmap_single(lp->ndev->dev.parent,
+                                                lp->rx_skb[i].mapping,
+                                                lp->rx_skb[i].len,
+                                                DMA_FROM_DEVICE);
+
+                               dev_kfree_skb(lp->rx_skb[i].skb);
+                               lp->rx_skb[i].skb = NULL;
+                               lp->rx_skb[i].mapping = 0;
+                       }
+               }
+       }
+
+       if (lp->tx_skb) {
+               for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
+                       if (lp->tx_skb[i].skb) {
+                               dev_kfree_skb(lp->tx_skb[i].skb);
+                               lp->tx_skb[i].skb = NULL;
+                       }
+                       if (lp->tx_skb[i].mapping) {
+                               dma_unmap_single(lp->ndev->dev.parent,
+                                                lp->tx_skb[i].mapping,
+                                                lp->tx_skb[i].len,
+                                                DMA_TO_DEVICE);
+                               lp->tx_skb[i].mapping = 0;
+                       }
+               }
+       }
+}
+
+static void dwceqos_descriptor_free(struct net_local *lp)
+{
+       int size;
+
+       dwceqos_clean_rings(lp);
+
+       kfree(lp->tx_skb);
+       lp->tx_skb = NULL;
+       kfree(lp->rx_skb);
+       lp->rx_skb = NULL;
+
+       size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+       if (lp->rx_descs) {
+               dma_free_coherent(lp->ndev->dev.parent, size,
+                                 (void *)(lp->rx_descs), lp->rx_descs_addr);
+               lp->rx_descs = NULL;
+       }
+
+       size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+       if (lp->tx_descs) {
+               dma_free_coherent(lp->ndev->dev.parent, size,
+                                 (void *)(lp->tx_descs), lp->tx_descs_addr);
+               lp->tx_descs = NULL;
+       }
+}
+
+static int dwceqos_descriptor_init(struct net_local *lp)
+{
+       int size;
+       u32 i;
+
+       lp->gso_size = 0;
+
+       lp->tx_skb = NULL;
+       lp->rx_skb = NULL;
+       lp->rx_descs = NULL;
+       lp->tx_descs = NULL;
+
+       /* Reset the DMA indexes */
+       lp->rx_cur = 0;
+       lp->tx_cur = 0;
+       lp->tx_next = 0;
+       lp->tx_free = DWCEQOS_TX_DCNT;
+
+       /* Allocate Ring descriptors */
+       size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
+       lp->rx_skb = kzalloc(size, GFP_KERNEL);
+       if (!lp->rx_skb)
+               goto err_out;
+
+       size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
+       lp->tx_skb = kzalloc(size, GFP_KERNEL);
+       if (!lp->tx_skb)
+               goto err_out;
+
+       /* Allocate DMA descriptors */
+       size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+       lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+                       &lp->rx_descs_addr, 0);
+       if (!lp->rx_descs)
+               goto err_out;
+       lp->rx_descs_tail_addr = lp->rx_descs_addr +
+               sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
+
+       size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+       lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+                       &lp->tx_descs_addr, 0);
+       if (!lp->tx_descs)
+               goto err_out;
+       lp->tx_descs_tail_addr = lp->tx_descs_addr +
+               sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
+
+       /* Initialize RX Ring Descriptors and buffers */
+       for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
+               dwceqos_alloc_rxring_desc(lp, i);
+               if (!(lp->rx_skb[lp->rx_cur].skb))
+                       goto err_out;
+       }
+
+       /* Initialize TX Descriptors */
+       for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
+               lp->tx_descs[i].des0 = 0;
+               lp->tx_descs[i].des1 = 0;
+               lp->tx_descs[i].des2 = 0;
+               lp->tx_descs[i].des3 = 0;
+       }
+
+       /* Make descriptor writes visible to the DMA. */
+       wmb();
+
+       return 0;
+
+err_out:
+       dwceqos_descriptor_free(lp);
+       return -ENOMEM;
+}
+
+static int dwceqos_packet_avail(struct net_local *lp)
+{
+       return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
+}
+
+static void dwceqos_get_hwfeatures(struct net_local *lp)
+{
+       lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
+       lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
+       lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
+}
+
+static void dwceqos_dma_enable_txirq(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+       regval |= DWCEQOS_DMA_CH0_IE_TIE;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_txirq(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+       regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_enable_rxirq(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+       regval |= DWCEQOS_DMA_CH0_IE_RIE;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_rxirq(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+       regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
+{
+       dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
+       dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
+}
+
+static int dwceqos_mii_init(struct net_local *lp)
+{
+       int ret = -ENXIO, i;
+       struct resource res;
+       struct device_node *mdionode;
+
+       mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
+
+       if (!mdionode)
+               return 0;
+
+       lp->mii_bus = mdiobus_alloc();
+       if (!lp->mii_bus) {
+               ret = -ENOMEM;
+               goto err_out;
+       }
+
+       lp->mii_bus->name  = "DWCEQOS MII bus";
+       lp->mii_bus->read  = &dwceqos_mdio_read;
+       lp->mii_bus->write = &dwceqos_mdio_write;
+       lp->mii_bus->priv = lp;
+       lp->mii_bus->parent = &lp->ndev->dev;
+
+       lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       if (!lp->mii_bus->irq) {
+               ret = -ENOMEM;
+               goto err_out_free_mdiobus;
+       }
+
+       for (i = 0; i < PHY_MAX_ADDR; i++)
+               lp->mii_bus->irq[i] = PHY_POLL;
+       of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
+       snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+                (unsigned long long)res.start);
+       if (of_mdiobus_register(lp->mii_bus, mdionode))
+               goto err_out_free_mdio_irq;
+
+       return 0;
+
+err_out_free_mdio_irq:
+       kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+       mdiobus_free(lp->mii_bus);
+err_out:
+       of_node_put(mdionode);
+       return ret;
+}
+
+/* DMA reset. When issued also resets all MTL and MAC registers as well */
+static void dwceqos_reset_hw(struct net_local *lp)
+{
+       /* Wait (at most) 0.5 seconds for DMA reset*/
+       int i = 5000;
+       u32 reg;
+
+       /* Force gigabit to guarantee a TX clock for GMII. */
+       reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+       reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
+       reg |= DWCEQOS_MAC_CFG_DM;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
+
+       do {
+               udelay(100);
+               i--;
+               reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
+       } while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
+       /* We might experience a timeout if the chip clock mux is broken */
+       if (!i)
+               netdev_err(lp->ndev, "DMA reset timed out!\n");
+}
+
+static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
+{
+       if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
+               netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
+                          dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
+                               "read" : "write",
+                          dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
+                               "descr" : "data",
+                          dma_status);
+
+               print_status(lp);
+       }
+       if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
+               netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
+                          dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
+                               "read" : "write",
+                          dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
+                               "descr" : "data",
+                          dma_status);
+
+               print_status(lp);
+       }
+}
+
+static void dwceqos_mmc_interrupt(struct net_local *lp)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->stats_lock, flags);
+
+       /* A latched mmc interrupt can not be masked, we must read
+        *  all the counters with an interrupt pending.
+        */
+       dwceqos_read_mmc_counters(lp,
+                                 dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
+                                 dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
+
+       spin_unlock_irqrestore(&lp->stats_lock, flags);
+}
+
+static void dwceqos_mac_interrupt(struct net_local *lp)
+{
+       u32 cause;
+
+       cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
+
+       if (cause & DWCEQOS_MAC_IS_MMC_INT)
+               dwceqos_mmc_interrupt(lp);
+}
+
+static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = dev_id;
+       struct net_local *lp = netdev_priv(ndev);
+
+       u32 cause;
+       u32 dma_status;
+       irqreturn_t ret = IRQ_NONE;
+
+       cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
+       /* DMA Channel 0 Interrupt */
+       if (cause & DWCEQOS_DMA_IS_DC0IS) {
+               dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
+
+               /* Transmit Interrupt */
+               if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
+                       tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+                       dwceqos_dma_disable_txirq(lp);
+               }
+
+               /* Receive Interrupt */
+               if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
+                       /* Disable RX IRQs */
+                       dwceqos_dma_disable_rxirq(lp);
+                       napi_schedule(&lp->napi);
+               }
+
+               /* Fatal Bus Error interrupt */
+               if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
+                       dwceqos_fatal_bus_error(lp, dma_status);
+
+                       /* errata 9000831707 */
+                       dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
+                                     DWCEQOS_DMA_CH0_IS_REB;
+               }
+
+               /* Ack all DMA Channel 0 IRQs */
+               dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
+               ret = IRQ_HANDLED;
+       }
+
+       if (cause & DWCEQOS_DMA_IS_MTLIS) {
+               u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
+
+               dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
+               ret = IRQ_HANDLED;
+       }
+
+       if (cause & DWCEQOS_DMA_IS_MACIS) {
+               dwceqos_mac_interrupt(lp);
+               ret = IRQ_HANDLED;
+       }
+       return ret;
+}
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
+       if (enable)
+               regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+       else
+               regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
+
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+
+       /* MTL flow control */
+       regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+       if (enable)
+               regval |= DWCEQOS_MTL_RXQ_EHFC;
+       else
+               regval &= ~DWCEQOS_MTL_RXQ_EHFC;
+
+       dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+       /* MAC flow control */
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
+       if (enable)
+               regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+       else
+               regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_flow_control(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+       int RQS, RFD, RFA;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+
+       regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+
+       /* The queue size is in units of 256 bytes. We want 512 bytes units for
+        * the threshold fields.
+        */
+       RQS = ((regval >> 20) & 0x3FF) + 1;
+       RQS /= 2;
+
+       /* The thresholds are relative to a full queue, with a bias
+        * of 1 KiByte below full.
+        */
+       RFD = RQS / 2 - 2;
+       RFA = RQS / 8 - 2;
+
+       regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
+
+       if (RFD >= 0 && RFA >= 0) {
+               dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+       } else {
+               netdev_warn(lp->ndev,
+                           "FIFO too small for flow control.");
+       }
+
+       regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
+                DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_clock(struct net_local *lp)
+{
+       unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
+
+       BUG_ON(!rate_mhz);
+
+       dwceqos_write(lp,
+                     REG_DWCEQOS_MAC_1US_TIC_COUNTER,
+                     DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
+}
+
+static void dwceqos_configure_bus(struct net_local *lp)
+{
+       u32 sysbus_reg;
+
+       /* N.B. We do not support the Fixed Burst mode because it
+        * opens a race window by making HW access to DMA descriptors
+        * non-atomic.
+        */
+
+       sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
+
+       if (lp->bus_cfg.en_lpi)
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
+
+       if (lp->bus_cfg.burst_map)
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+                       lp->bus_cfg.burst_map);
+       else
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+                       DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
+
+       if (lp->bus_cfg.read_requests)
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+                       lp->bus_cfg.read_requests - 1);
+       else
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+                       DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
+
+       if (lp->bus_cfg.write_requests)
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+                       lp->bus_cfg.write_requests - 1);
+       else
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+                       DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
+
+       if (netif_msg_hw(lp))
+               netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
+}
+
+static void dwceqos_init_hw(struct net_local *lp)
+{
+       u32 regval;
+       u32 buswidth;
+       u32 dma_skip;
+
+       /* Software reset */
+       dwceqos_reset_hw(lp);
+
+       dwceqos_configure_bus(lp);
+
+       /* Probe data bus width, 32/64/128 bits. */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
+       buswidth = (regval ^ 0xF) + 1;
+
+       /* Cache-align dma descriptors. */
+       dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
+                     DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
+                     DWCEQOS_DMA_CH_CTRL_PBLX8);
+
+       /* Initialize DMA Channel 0 */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
+                     (u32)lp->tx_descs_addr);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
+                     (u32)lp->rx_descs_addr);
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+                     lp->tx_descs_tail_addr);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+                     lp->rx_descs_tail_addr);
+
+       if (lp->bus_cfg.tx_pbl)
+               regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
+       else
+               regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+       /* Enable TSO if the HW support it */
+       if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+               regval |= DWCEQOS_DMA_CH_TX_TSE;
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
+
+       if (lp->bus_cfg.rx_pbl)
+               regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
+       else
+               regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+       regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+       regval |= DWCEQOS_DMA_CH_CTRL_START;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+       /* Initialize MTL Queues */
+       regval = DWCEQOS_MTL_SCHALG_STRICT;
+       dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
+
+       regval = DWCEQOS_MTL_TXQ_SIZE(
+                       DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
+               DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
+               DWCEQOS_MTL_TXQ_TTC512;
+       dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
+
+       regval = DWCEQOS_MTL_RXQ_SIZE(
+                       DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
+               DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
+       dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+       dwceqos_configure_flow_control(lp);
+
+       /* Initialize MAC */
+       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+       lp->eee_enabled = 0;
+
+       dwceqos_configure_clock(lp);
+
+       /* MMC counters */
+
+       /* probe implemented counters */
+       dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
+       dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
+       lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
+       lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
+
+       dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
+               DWCEQOS_MMC_CTRL_RSTONRD);
+       dwceqos_enable_mmc_interrupt(lp);
+
+       /* Enable Interrupts */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+                     DWCEQOS_DMA_CH0_IE_NIE |
+                     DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+                     DWCEQOS_DMA_CH0_IE_AIE |
+                     DWCEQOS_DMA_CH0_IE_FBEE);
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
+               DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+       /* Start TX DMA */
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
+                     regval | DWCEQOS_DMA_CH_CTRL_START);
+
+       /* Enable MAC TX/RX */
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
+                     regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+}
+
+static void dwceqos_tx_reclaim(unsigned long data)
+{
+       struct net_device *ndev = (struct net_device *)data;
+       struct net_local *lp = netdev_priv(ndev);
+       unsigned int tx_bytes = 0;
+       unsigned int tx_packets = 0;
+
+       spin_lock(&lp->tx_lock);
+
+       while (lp->tx_free < DWCEQOS_TX_DCNT) {
+               struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
+               struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
+
+               /* Descriptor still being held by DMA ? */
+               if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
+                       break;
+
+               if (rd->mapping)
+                       dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
+                                        DMA_TO_DEVICE);
+
+               if (unlikely(rd->skb)) {
+                       ++tx_packets;
+                       tx_bytes += rd->skb->len;
+                       dev_consume_skb_any(rd->skb);
+               }
+
+               rd->skb = NULL;
+               rd->mapping = 0;
+               lp->tx_free++;
+               lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
+
+               if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
+                   (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
+                       if (netif_msg_tx_err(lp))
+                               netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
+                                          dd->des3);
+                       if (netif_msg_hw(lp))
+                               print_status(lp);
+               }
+       }
+       spin_unlock(&lp->tx_lock);
+
+       netdev_completed_queue(ndev, tx_packets, tx_bytes);
+
+       dwceqos_dma_enable_txirq(lp);
+       netif_wake_queue(ndev);
+}
+
+static int dwceqos_rx(struct net_local *lp, int budget)
+{
+       struct sk_buff *skb;
+       u32 tot_size = 0;
+       unsigned int n_packets = 0;
+       unsigned int n_descs = 0;
+       u32 len;
+
+       struct dwceqos_dma_desc *dd;
+       struct sk_buff *new_skb;
+       dma_addr_t new_skb_baddr = 0;
+
+       while (n_descs < budget) {
+               if (!dwceqos_packet_avail(lp))
+                       break;
+
+               new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+               if (!new_skb) {
+                       netdev_err(lp->ndev, "no memory for new sk_buff\n");
+                       break;
+               }
+
+               /* Get dma handle of skb->data */
+               new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
+                                       new_skb->data,
+                                       DWCEQOS_RX_BUF_SIZE,
+                                       DMA_FROM_DEVICE);
+               if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+                       netdev_err(lp->ndev, "DMA map error\n");
+                       dev_kfree_skb(new_skb);
+                       break;
+               }
+
+               /* Read descriptor data after reading owner bit. */
+               dma_rmb();
+
+               dd = &lp->rx_descs[lp->rx_cur];
+               len = DWCEQOS_DMA_RDES3_PL(dd->des3);
+               skb = lp->rx_skb[lp->rx_cur].skb;
+
+               /* Unmap old buffer */
+               dma_unmap_single(lp->ndev->dev.parent,
+                                lp->rx_skb[lp->rx_cur].mapping,
+                                lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
+
+               /* Discard packet on reception error or bad checksum */
+               if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
+                   (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
+                       dev_kfree_skb(skb);
+                       skb = NULL;
+               } else {
+                       skb_put(skb, len);
+                       skb->protocol = eth_type_trans(skb, lp->ndev);
+                       switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
+                       case DWCEQOS_DMA_RDES1_PT_UDP:
+                       case DWCEQOS_DMA_RDES1_PT_TCP:
+                       case DWCEQOS_DMA_RDES1_PT_ICMP:
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               break;
+                       default:
+                               skb->ip_summed = CHECKSUM_NONE;
+                               break;
+                       }
+               }
+
+               if (unlikely(!skb)) {
+                       if (netif_msg_rx_err(lp))
+                               netdev_dbg(lp->ndev, "rx error: des3=%X\n",
+                                          lp->rx_descs[lp->rx_cur].des3);
+               } else {
+                       tot_size += skb->len;
+                       n_packets++;
+
+                       netif_receive_skb(skb);
+               }
+
+               lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
+               lp->rx_descs[lp->rx_cur].des1 = 0;
+               lp->rx_descs[lp->rx_cur].des2 = 0;
+               /* The DMA must observe des0/1/2 written before des3. */
+               wmb();
+               lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
+                                               DWCEQOS_DMA_RDES3_OWN  |
+                                               DWCEQOS_DMA_RDES3_BUF1V;
+
+               lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
+               lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
+               lp->rx_skb[lp->rx_cur].skb = new_skb;
+
+               n_descs++;
+               lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
+       }
+
+       /* Make sure any ownership update is written to the descriptors before
+        * DMA wakeup.
+        */
+       wmb();
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
+       /* Wake up RX by writing tail pointer */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+                     lp->rx_descs_tail_addr);
+
+       return n_descs;
+}
+
+static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct net_local *lp = container_of(napi, struct net_local, napi);
+       int work_done = 0;
+
+       work_done = dwceqos_rx(lp, budget - work_done);
+
+       if (!dwceqos_packet_avail(lp) && work_done < budget) {
+               napi_complete(napi);
+               dwceqos_dma_enable_rxirq(lp);
+       } else {
+               work_done = budget;
+       }
+
+       return work_done;
+}
+
+/* Reinitialize function if a TX timed out */
+static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
+{
+       struct net_local *lp = container_of(data, struct net_local,
+               txtimeout_reinit);
+
+       netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
+                  DWCEQOS_TX_TIMEOUT);
+
+       if (netif_msg_hw(lp))
+               print_status(lp);
+
+       rtnl_lock();
+       dwceqos_stop(lp->ndev);
+       dwceqos_open(lp->ndev);
+       rtnl_unlock();
+}
+
+/* DT Probing function called by main probe */
+static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
+{
+       struct net_device *ndev;
+       struct net_local *lp;
+       const void *mac_address;
+       struct dwceqos_bus_cfg *bus_cfg;
+       struct device_node *np = pdev->dev.of_node;
+
+       ndev = platform_get_drvdata(pdev);
+       lp = netdev_priv(ndev);
+       bus_cfg = &lp->bus_cfg;
+
+       /* Set the MAC address. */
+       mac_address = of_get_mac_address(pdev->dev.of_node);
+       if (mac_address)
+               ether_addr_copy(ndev->dev_addr, mac_address);
+
+       /* These are all optional parameters */
+       lp->en_tx_lpi_clockgating =  of_property_read_bool(np,
+               "snps,en-tx-lpi-clockgating");
+       bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
+       of_property_read_u32(np, "snps,write-requests",
+                            &bus_cfg->write_requests);
+       of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
+       of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
+       of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
+       of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
+
+       netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
+                  bus_cfg->en_lpi,
+                  bus_cfg->write_requests,
+                  bus_cfg->read_requests,
+                  bus_cfg->burst_map,
+                  bus_cfg->rx_pbl,
+                  bus_cfg->tx_pbl);
+
+       return 0;
+}
+
+static int dwceqos_open(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       int res;
+
+       dwceqos_reset_state(lp);
+       res = dwceqos_descriptor_init(lp);
+       if (res) {
+               netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
+               return res;
+       }
+       netdev_reset_queue(ndev);
+
+       napi_enable(&lp->napi);
+       phy_start(lp->phy_dev);
+       dwceqos_init_hw(lp);
+
+       netif_start_queue(ndev);
+       tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+       return 0;
+}
+
+static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
+{
+       u32 reg;
+
+       reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
+       reg = DMA_GET_TX_STATE_CH0(reg);
+
+       return reg == DMA_TX_CH_SUSPENDED;
+}
+
+static void dwceqos_drain_dma(struct net_local *lp)
+{
+       /* Wait for all pending TX buffers to be sent. Upper limit based
+        * on max frame size on a 10 Mbit link.
+        */
+       size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
+
+       while (!dweqos_is_tx_dma_suspended(lp) && limit--)
+               usleep_range(100, 200);
+}
+
+static int dwceqos_stop(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+
+       phy_stop(lp->phy_dev);
+
+       tasklet_disable(&lp->tx_bdreclaim_tasklet);
+       netif_stop_queue(ndev);
+       napi_disable(&lp->napi);
+
+       dwceqos_drain_dma(lp);
+
+       netif_tx_lock(lp->ndev);
+       dwceqos_reset_hw(lp);
+       dwceqos_descriptor_free(lp);
+       netif_tx_unlock(lp->ndev);
+
+       return 0;
+}
+
+static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
+                                   unsigned short gso_size)
+{
+       struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
+
+       dd->des0 = 0;
+       dd->des1 = 0;
+       dd->des2 = gso_size;
+       dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
+
+       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+}
+
+static void dwceqos_tx_poll_demand(struct net_local *lp)
+{
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+                     lp->tx_descs_tail_addr);
+}
+
+struct dwceqos_tx {
+       size_t nr_descriptors;
+       size_t initial_descriptor;
+       size_t last_descriptor;
+       size_t prev_gso_size;
+       size_t network_header_len;
+};
+
+static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
+                              struct dwceqos_tx *tx)
+{
+       size_t n = 1;
+       size_t i;
+
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
+               ++n;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               n +=  (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
+                      BYTES_PER_DMA_DESC;
+       }
+
+       tx->nr_descriptors = n;
+       tx->initial_descriptor = lp->tx_next;
+       tx->last_descriptor = lp->tx_next;
+       tx->prev_gso_size = lp->gso_size;
+
+       tx->network_header_len = skb_transport_offset(skb);
+       if (skb_is_gso(skb))
+               tx->network_header_len += tcp_hdrlen(skb);
+}
+
+static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
+                            struct dwceqos_tx *tx)
+{
+       struct ring_desc *rd;
+       struct dwceqos_dma_desc *dd;
+       size_t payload_len;
+       dma_addr_t dma_handle;
+
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
+               dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
+               lp->gso_size = skb_shinfo(skb)->gso_size;
+       }
+
+       dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
+                                   skb_headlen(skb), DMA_TO_DEVICE);
+
+       if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+               netdev_err(lp->ndev, "TX DMA Mapping error\n");
+               return -ENOMEM;
+       }
+
+       rd = &lp->tx_skb[lp->tx_next];
+       dd = &lp->tx_descs[lp->tx_next];
+
+       rd->skb = NULL;
+       rd->len = skb_headlen(skb);
+       rd->mapping = dma_handle;
+
+       /* Set up DMA Descriptor */
+       dd->des0 = dma_handle;
+
+       if (skb_is_gso(skb)) {
+               payload_len = skb_headlen(skb) - tx->network_header_len;
+
+               if (payload_len)
+                       dd->des1 = dma_handle + tx->network_header_len;
+               dd->des2 = tx->network_header_len |
+                       DWCEQOS_DMA_DES2_B2L(payload_len);
+               dd->des3 = DWCEQOS_DMA_TDES3_TSE |
+                       DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
+                       (skb->len - tx->network_header_len);
+       } else {
+               dd->des1 = 0;
+               dd->des2 = skb_headlen(skb);
+               dd->des3 = skb->len;
+
+               switch (skb->ip_summed) {
+               case CHECKSUM_PARTIAL:
+                       dd->des3 |= DWCEQOS_DMA_TDES3_CA;
+               case CHECKSUM_NONE:
+               case CHECKSUM_UNNECESSARY:
+               case CHECKSUM_COMPLETE:
+               default:
+                       break;
+               }
+       }
+
+       dd->des3 |= DWCEQOS_DMA_TDES3_FD;
+       if (lp->tx_next  != tx->initial_descriptor)
+               dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+       tx->last_descriptor = lp->tx_next;
+       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+
+       return 0;
+}
+
+static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
+                           struct dwceqos_tx *tx)
+{
+       struct ring_desc *rd = NULL;
+       struct dwceqos_dma_desc *dd;
+       dma_addr_t dma_handle;
+       size_t i;
+
+       /* Setup more ring and DMA descriptor if the packet is fragmented */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               size_t frag_size;
+               size_t consumed_size;
+
+               /* Map DMA Area */
+               dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
+                                             skb_frag_size(frag),
+                                             DMA_TO_DEVICE);
+               if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+                       netdev_err(lp->ndev, "DMA Mapping error\n");
+                       return -ENOMEM;
+               }
+
+               /* order-3 fragments span more than one descriptor. */
+               frag_size = skb_frag_size(frag);
+               consumed_size = 0;
+               while (consumed_size < frag_size) {
+                       size_t dma_size = min_t(size_t, 16376,
+                                               frag_size - consumed_size);
+
+                       rd = &lp->tx_skb[lp->tx_next];
+                       memset(rd, 0, sizeof(*rd));
+
+                       dd = &lp->tx_descs[lp->tx_next];
+
+                       /* Set DMA Descriptor fields */
+                       dd->des0 = dma_handle;
+                       dd->des1 = 0;
+                       dd->des2 = dma_size;
+
+                       if (skb_is_gso(skb))
+                               dd->des3 = (skb->len - tx->network_header_len);
+                       else
+                               dd->des3 = skb->len;
+
+                       dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+                       tx->last_descriptor = lp->tx_next;
+                       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+                       consumed_size += dma_size;
+               }
+
+               rd->len = skb_frag_size(frag);
+               rd->mapping = dma_handle;
+       }
+
+       return 0;
+}
+
+static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
+                               struct dwceqos_tx *tx)
+{
+       lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
+       lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
+
+       lp->tx_skb[tx->last_descriptor].skb = skb;
+
+       /* Make all descriptor updates visible to the DMA before setting the
+        * owner bit.
+        */
+       wmb();
+
+       lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+       /* Make the owner bit visible before TX wakeup. */
+       wmb();
+
+       dwceqos_tx_poll_demand(lp);
+}
+
+static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+{
+       size_t i = tx->initial_descriptor;
+
+       while (i != lp->tx_next) {
+               if (lp->tx_skb[i].mapping)
+                       dma_unmap_single(lp->ndev->dev.parent,
+                                        lp->tx_skb[i].mapping,
+                                        lp->tx_skb[i].len,
+                                        DMA_TO_DEVICE);
+
+               lp->tx_skb[i].mapping = 0;
+               lp->tx_skb[i].skb = NULL;
+
+               memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
+
+               i = (i + 1) % DWCEQOS_TX_DCNT;
+       }
+
+       lp->tx_next = tx->initial_descriptor;
+       lp->gso_size = tx->prev_gso_size;
+}
+
+static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct dwceqos_tx trans;
+       int err;
+
+       dwceqos_tx_prepare(skb, lp, &trans);
+       if (lp->tx_free < trans.nr_descriptors) {
+               netif_stop_queue(ndev);
+               return NETDEV_TX_BUSY;
+       }
+
+       err = dwceqos_tx_linear(skb, lp, &trans);
+       if (err)
+               goto tx_error;
+
+       err = dwceqos_tx_frags(skb, lp, &trans);
+       if (err)
+               goto tx_error;
+
+       WARN_ON(lp->tx_next !=
+               ((trans.initial_descriptor + trans.nr_descriptors) %
+                DWCEQOS_TX_DCNT));
+
+       dwceqos_tx_finalize(skb, lp, &trans);
+
+       netdev_sent_queue(ndev, skb->len);
+
+       spin_lock_bh(&lp->tx_lock);
+       lp->tx_free -= trans.nr_descriptors;
+       spin_unlock_bh(&lp->tx_lock);
+
+       ndev->trans_start = jiffies;
+       return 0;
+
+tx_error:
+       dwceqos_tx_rollback(lp, &trans);
+       dev_kfree_skb(skb);
+       return 0;
+}
+
+/* Set MAC address and then update HW accordingly */
+static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+       if (netif_running(ndev))
+               return -EBUSY;
+
+       if (!is_valid_ether_addr(hwaddr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+       return 0;
+}
+
+static void dwceqos_tx_timeout(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+
+       queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+                                 unsigned int reg_n)
+{
+       unsigned long data;
+
+       data = (addr[5] << 8) | addr[4];
+       dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
+                     data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
+       data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+       dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
+}
+
+static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
+{
+       /* Do not disable MAC address 0 */
+       if (reg_n != 0)
+               dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
+}
+
+static void dwceqos_set_rx_mode(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       u32 regval = 0;
+       u32 mc_filter[2];
+       int reg = 1;
+       struct netdev_hw_addr *ha;
+       unsigned int max_mac_addr;
+
+       max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
+
+       if (ndev->flags & IFF_PROMISC) {
+               regval = DWCEQOS_MAC_PKT_FILT_PR;
+       } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
+                               (ndev->flags & IFF_ALLMULTI))) {
+               regval = DWCEQOS_MAC_PKT_FILT_PM;
+               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
+               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
+       } else if (!netdev_mc_empty(ndev)) {
+               regval = DWCEQOS_MAC_PKT_FILT_HMC;
+               memset(mc_filter, 0, sizeof(mc_filter));
+               netdev_for_each_mc_addr(ha, ndev) {
+                       /* The upper 6 bits of the calculated CRC are used to
+                        * index the contens of the hash table
+                        */
+                       int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+                       /* The most significant bit determines the register
+                        * to use (H/L) while the other 5 bits determine
+                        * the bit within the register.
+                        */
+                       mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+               }
+               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
+               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
+       }
+       if (netdev_uc_count(ndev) > max_mac_addr) {
+               regval |= DWCEQOS_MAC_PKT_FILT_PR;
+       } else {
+               netdev_for_each_uc_addr(ha, ndev) {
+                       dwceqos_set_umac_addr(lp, ha->addr, reg);
+                       reg++;
+               }
+               for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
+                       dwceqos_disable_umac_addr(lp, reg);
+       }
+       dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dwceqos_poll_controller(struct net_device *ndev)
+{
+       disable_irq(ndev->irq);
+       dwceqos_interrupt(ndev->irq, ndev);
+       enable_irq(ndev->irq);
+}
+#endif
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+                                     u32 tx_mask)
+{
+       if (tx_mask & BIT(27))
+               lp->mmc_counters.txlpitranscntr +=
+                       dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
+       if (tx_mask & BIT(26))
+               lp->mmc_counters.txpiuscntr +=
+                       dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
+       if (tx_mask & BIT(25))
+               lp->mmc_counters.txoversize_g +=
+                       dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
+       if (tx_mask & BIT(24))
+               lp->mmc_counters.txvlanpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
+       if (tx_mask & BIT(23))
+               lp->mmc_counters.txpausepackets +=
+                       dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
+       if (tx_mask & BIT(22))
+               lp->mmc_counters.txexcessdef +=
+                       dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
+       if (tx_mask & BIT(21))
+               lp->mmc_counters.txpacketcount_g +=
+                       dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
+       if (tx_mask & BIT(20))
+               lp->mmc_counters.txoctetcount_g +=
+                       dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
+       if (tx_mask & BIT(19))
+               lp->mmc_counters.txcarriererror +=
+                       dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
+       if (tx_mask & BIT(18))
+               lp->mmc_counters.txexcesscol +=
+                       dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
+       if (tx_mask & BIT(17))
+               lp->mmc_counters.txlatecol +=
+                       dwceqos_read(lp, DWC_MMC_TXLATECOL);
+       if (tx_mask & BIT(16))
+               lp->mmc_counters.txdeferred +=
+                       dwceqos_read(lp, DWC_MMC_TXDEFERRED);
+       if (tx_mask & BIT(15))
+               lp->mmc_counters.txmulticol_g +=
+                       dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
+       if (tx_mask & BIT(14))
+               lp->mmc_counters.txsinglecol_g +=
+                       dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
+       if (tx_mask & BIT(13))
+               lp->mmc_counters.txunderflowerror +=
+                       dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
+       if (tx_mask & BIT(12))
+               lp->mmc_counters.txbroadcastpackets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
+       if (tx_mask & BIT(11))
+               lp->mmc_counters.txmulticastpackets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
+       if (tx_mask & BIT(10))
+               lp->mmc_counters.txunicastpackets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
+       if (tx_mask & BIT(9))
+               lp->mmc_counters.tx1024tomaxoctets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
+       if (tx_mask & BIT(8))
+               lp->mmc_counters.tx512to1023octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
+       if (tx_mask & BIT(7))
+               lp->mmc_counters.tx256to511octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
+       if (tx_mask & BIT(6))
+               lp->mmc_counters.tx128to255octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
+       if (tx_mask & BIT(5))
+               lp->mmc_counters.tx65to127octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
+       if (tx_mask & BIT(4))
+               lp->mmc_counters.tx64octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
+       if (tx_mask & BIT(3))
+               lp->mmc_counters.txmulticastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
+       if (tx_mask & BIT(2))
+               lp->mmc_counters.txbroadcastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
+       if (tx_mask & BIT(1))
+               lp->mmc_counters.txpacketcount_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
+       if (tx_mask & BIT(0))
+               lp->mmc_counters.txoctetcount_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
+
+       if (rx_mask & BIT(27))
+               lp->mmc_counters.rxlpitranscntr +=
+                       dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
+       if (rx_mask & BIT(26))
+               lp->mmc_counters.rxlpiuscntr +=
+                       dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
+       if (rx_mask & BIT(25))
+               lp->mmc_counters.rxctrlpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
+       if (rx_mask & BIT(24))
+               lp->mmc_counters.rxrcverror +=
+                       dwceqos_read(lp, DWC_MMC_RXRCVERROR);
+       if (rx_mask & BIT(23))
+               lp->mmc_counters.rxwatchdog +=
+                       dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
+       if (rx_mask & BIT(22))
+               lp->mmc_counters.rxvlanpackets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
+       if (rx_mask & BIT(21))
+               lp->mmc_counters.rxfifooverflow +=
+                       dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
+       if (rx_mask & BIT(20))
+               lp->mmc_counters.rxpausepackets +=
+                       dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
+       if (rx_mask & BIT(19))
+               lp->mmc_counters.rxoutofrangetype +=
+                       dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
+       if (rx_mask & BIT(18))
+               lp->mmc_counters.rxlengtherror +=
+                       dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
+       if (rx_mask & BIT(17))
+               lp->mmc_counters.rxunicastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
+       if (rx_mask & BIT(16))
+               lp->mmc_counters.rx1024tomaxoctets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
+       if (rx_mask & BIT(15))
+               lp->mmc_counters.rx512to1023octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
+       if (rx_mask & BIT(14))
+               lp->mmc_counters.rx256to511octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
+       if (rx_mask & BIT(13))
+               lp->mmc_counters.rx128to255octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
+       if (rx_mask & BIT(12))
+               lp->mmc_counters.rx65to127octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
+       if (rx_mask & BIT(11))
+               lp->mmc_counters.rx64octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
+       if (rx_mask & BIT(10))
+               lp->mmc_counters.rxoversize_g +=
+                       dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
+       if (rx_mask & BIT(9))
+               lp->mmc_counters.rxundersize_g +=
+                       dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
+       if (rx_mask & BIT(8))
+               lp->mmc_counters.rxjabbererror +=
+                       dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
+       if (rx_mask & BIT(7))
+               lp->mmc_counters.rxrunterror +=
+                       dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
+       if (rx_mask & BIT(6))
+               lp->mmc_counters.rxalignmenterror +=
+                       dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
+       if (rx_mask & BIT(5))
+               lp->mmc_counters.rxcrcerror +=
+                       dwceqos_read(lp, DWC_MMC_RXCRCERROR);
+       if (rx_mask & BIT(4))
+               lp->mmc_counters.rxmulticastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
+       if (rx_mask & BIT(3))
+               lp->mmc_counters.rxbroadcastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
+       if (rx_mask & BIT(2))
+               lp->mmc_counters.rxoctetcount_g +=
+                       dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
+       if (rx_mask & BIT(1))
+               lp->mmc_counters.rxoctetcount_gb +=
+                       dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
+       if (rx_mask & BIT(0))
+               lp->mmc_counters.rxpacketcount_gb +=
+                       dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+}
+
+static struct rtnl_link_stats64*
+dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+{
+       unsigned long flags;
+       struct net_local *lp = netdev_priv(ndev);
+       struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
+
+       spin_lock_irqsave(&lp->stats_lock, flags);
+       dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+                                 lp->mmc_tx_counters_mask);
+       spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+       s->rx_packets = hwstats->rxpacketcount_gb;
+       s->rx_bytes = hwstats->rxoctetcount_gb;
+       s->rx_errors = hwstats->rxpacketcount_gb -
+               hwstats->rxbroadcastpackets_g -
+               hwstats->rxmulticastpackets_g -
+               hwstats->rxunicastpackets_g;
+       s->multicast = hwstats->rxmulticastpackets_g;
+       s->rx_length_errors = hwstats->rxlengtherror;
+       s->rx_crc_errors = hwstats->rxcrcerror;
+       s->rx_fifo_errors = hwstats->rxfifooverflow;
+
+       s->tx_packets = hwstats->txpacketcount_gb;
+       s->tx_bytes = hwstats->txoctetcount_gb;
+
+       if (lp->mmc_tx_counters_mask & BIT(21))
+               s->tx_errors = hwstats->txpacketcount_gb -
+                       hwstats->txpacketcount_g;
+       else
+               s->tx_errors = hwstats->txunderflowerror +
+                       hwstats->txcarriererror;
+
+       return s;
+}
+
+static int
+dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = lp->phy_dev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = lp->phy_dev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+       const struct net_local *lp = netdev_priv(ndev);
+
+       strcpy(ed->driver, lp->pdev->dev.driver->name);
+       strcpy(ed->version, DRIVER_VERSION);
+}
+
+static void dwceqos_get_pauseparam(struct net_device *ndev,
+                                  struct ethtool_pauseparam *pp)
+{
+       const struct net_local *lp = netdev_priv(ndev);
+
+       pp->autoneg = lp->flowcontrol.autoneg;
+       pp->tx_pause = lp->flowcontrol.tx;
+       pp->rx_pause = lp->flowcontrol.rx;
+}
+
+static int dwceqos_set_pauseparam(struct net_device *ndev,
+                                 struct ethtool_pauseparam *pp)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       int ret = 0;
+
+       lp->flowcontrol.autoneg = pp->autoneg;
+       if (pp->autoneg) {
+               lp->phy_dev->advertising |= ADVERTISED_Pause;
+               lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+       } else {
+               lp->phy_dev->advertising &= ~ADVERTISED_Pause;
+               lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+               lp->flowcontrol.rx = pp->rx_pause;
+               lp->flowcontrol.tx = pp->tx_pause;
+       }
+
+       if (netif_running(ndev))
+               ret = phy_start_aneg(lp->phy_dev);
+
+       return ret;
+}
+
+static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
+                               u8 *data)
+{
+       size_t i;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+               memcpy(data, dwceqos_ethtool_stats[i].stat_name,
+                      ETH_GSTRING_LEN);
+               data += ETH_GSTRING_LEN;
+       }
+}
+
+static void dwceqos_get_ethtool_stats(struct net_device *ndev,
+                                     struct ethtool_stats *stats, u64 *data)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       unsigned long flags;
+       size_t i;
+       u8 *mmcstat = (u8 *)&lp->mmc_counters;
+
+       spin_lock_irqsave(&lp->stats_lock, flags);
+       dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+                                 lp->mmc_tx_counters_mask);
+       spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+       for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+               memcpy(data,
+                      mmcstat + dwceqos_ethtool_stats[i].offset,
+                      sizeof(u64));
+               data++;
+       }
+}
+
+static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
+{
+       if (sset == ETH_SS_STATS)
+               return ARRAY_SIZE(dwceqos_ethtool_stats);
+
+       return -EOPNOTSUPP;
+}
+
+static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                            void *space)
+{
+       const struct net_local *lp = netdev_priv(dev);
+       u32 *reg_space = (u32 *)space;
+       int reg_offset;
+       int reg_ix = 0;
+
+       /* MAC registers */
+       for (reg_offset = START_MAC_REG_OFFSET;
+               reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+               reg_ix++;
+       }
+       /* MTL registers */
+       for (reg_offset = START_MTL_REG_OFFSET;
+               reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+               reg_ix++;
+       }
+
+       /* DMA registers */
+       for (reg_offset = START_DMA_REG_OFFSET;
+               reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+               reg_ix++;
+       }
+
+       BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
+}
+
+static int dwceqos_get_regs_len(struct net_device *dev)
+{
+       return REG_SPACE_SIZE;
+}
+
+static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
+{
+       return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
+}
+
+static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
+{
+       return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
+}
+
+static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       u32 lpi_status;
+       u32 lpi_enabled;
+
+       if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+               return -EOPNOTSUPP;
+
+       edata->eee_active  = lp->eee_active;
+       edata->eee_enabled = lp->eee_enabled;
+       edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
+       lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+       lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
+       edata->tx_lpi_enabled = lpi_enabled;
+
+       if (netif_msg_hw(lp)) {
+               u32 regval;
+
+               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+
+               netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
+                           dwceqos_get_rx_lpi_state(regval),
+                           dwceqos_get_tx_lpi_state(regval));
+       }
+
+       return phy_ethtool_get_eee(lp->phy_dev, edata);
+}
+
+static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       u32 regval;
+       unsigned long flags;
+
+       if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+               return -EOPNOTSUPP;
+
+       if (edata->eee_enabled && !lp->eee_active)
+               return -EOPNOTSUPP;
+
+       if (edata->tx_lpi_enabled) {
+               if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
+                   edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
+                       return -EINVAL;
+       }
+
+       lp->eee_enabled = edata->eee_enabled;
+
+       if (edata->eee_enabled && edata->tx_lpi_enabled) {
+               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
+                             edata->tx_lpi_timer);
+
+               spin_lock_irqsave(&lp->hw_lock, flags);
+               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+               regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
+               if (lp->en_tx_lpi_clockgating)
+                       regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
+               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+               spin_unlock_irqrestore(&lp->hw_lock, flags);
+       } else {
+               spin_lock_irqsave(&lp->hw_lock, flags);
+               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+               regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+               spin_unlock_irqrestore(&lp->hw_lock, flags);
+       }
+
+       return phy_ethtool_set_eee(lp->phy_dev, edata);
+}
+
+static u32 dwceqos_get_msglevel(struct net_device *ndev)
+{
+       const struct net_local *lp = netdev_priv(ndev);
+
+       return lp->msg_enable;
+}
+
+static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
+{
+       struct net_local *lp = netdev_priv(ndev);
+
+       lp->msg_enable = msglevel;
+}
+
+static struct ethtool_ops dwceqos_ethtool_ops = {
+       .get_settings   = dwceqos_get_settings,
+       .set_settings   = dwceqos_set_settings,
+       .get_drvinfo    = dwceqos_get_drvinfo,
+       .get_link       = ethtool_op_get_link,
+       .get_pauseparam = dwceqos_get_pauseparam,
+       .set_pauseparam = dwceqos_set_pauseparam,
+       .get_strings    = dwceqos_get_strings,
+       .get_ethtool_stats = dwceqos_get_ethtool_stats,
+       .get_sset_count = dwceqos_get_sset_count,
+       .get_regs       = dwceqos_get_regs,
+       .get_regs_len   = dwceqos_get_regs_len,
+       .get_eee        = dwceqos_get_eee,
+       .set_eee        = dwceqos_set_eee,
+       .get_msglevel   = dwceqos_get_msglevel,
+       .set_msglevel   = dwceqos_set_msglevel,
+};
+
+static struct net_device_ops netdev_ops = {
+       .ndo_open               = dwceqos_open,
+       .ndo_stop               = dwceqos_stop,
+       .ndo_start_xmit         = dwceqos_start_xmit,
+       .ndo_set_rx_mode        = dwceqos_set_rx_mode,
+       .ndo_set_mac_address    = dwceqos_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = dwceqos_poll_controller,
+#endif
+       .ndo_do_ioctl           = dwceqos_ioctl,
+       .ndo_tx_timeout         = dwceqos_tx_timeout,
+       .ndo_get_stats64        = dwceqos_get_stats64,
+};
+
+static const struct of_device_id dwceq_of_match[] = {
+       { .compatible = "snps,dwc-qos-ethernet-4.10", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dwceq_of_match);
+
+static int dwceqos_probe(struct platform_device *pdev)
+{
+       struct resource *r_mem = NULL;
+       struct net_device *ndev;
+       struct net_local *lp;
+       int ret = -ENXIO;
+
+       r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!r_mem) {
+               dev_err(&pdev->dev, "no IO resource defined.\n");
+               return -ENXIO;
+       }
+
+       ndev = alloc_etherdev(sizeof(*lp));
+       if (!ndev) {
+               dev_err(&pdev->dev, "etherdev allocation failed.\n");
+               return -ENOMEM;
+       }
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       lp = netdev_priv(ndev);
+       lp->ndev = ndev;
+       lp->pdev = pdev;
+       lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
+
+       spin_lock_init(&lp->tx_lock);
+       spin_lock_init(&lp->hw_lock);
+       spin_lock_init(&lp->stats_lock);
+
+       lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+       if (IS_ERR(lp->apb_pclk)) {
+               dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+               ret = PTR_ERR(lp->apb_pclk);
+               goto err_out_free_netdev;
+       }
+
+       ret = clk_prepare_enable(lp->apb_pclk);
+       if (ret) {
+               dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+               goto err_out_free_netdev;
+       }
+
+       lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+       if (IS_ERR(lp->baseaddr)) {
+               dev_err(&pdev->dev, "failed to map baseaddress.\n");
+               ret = PTR_ERR(lp->baseaddr);
+               goto err_out_clk_dis_aper;
+       }
+
+       ndev->irq = platform_get_irq(pdev, 0);
+       ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
+       ndev->netdev_ops = &netdev_ops;
+       ndev->ethtool_ops = &dwceqos_ethtool_ops;
+       ndev->base_addr = r_mem->start;
+
+       dwceqos_get_hwfeatures(lp);
+       dwceqos_mdio_set_csr(lp);
+
+       ndev->hw_features = NETIF_F_SG;
+
+       if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+               ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+       if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
+               ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+       if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
+               ndev->hw_features |= NETIF_F_RXCSUM;
+
+       ndev->features = ndev->hw_features;
+
+       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+               goto err_out_clk_dis_aper;
+       }
+
+       lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+       if (IS_ERR(lp->phy_ref_clk)) {
+               dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+               ret = PTR_ERR(lp->phy_ref_clk);
+               goto err_out_unregister_netdev;
+       }
+
+       ret = clk_prepare_enable(lp->phy_ref_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Unable to enable device clock.\n");
+               goto err_out_unregister_netdev;
+       }
+
+       lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+                                               "phy-handle", 0);
+       if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
+               ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "invalid fixed-link");
+                       goto err_out_unregister_netdev;
+               }
+
+               lp->phy_node = of_node_get(lp->pdev->dev.of_node);
+       }
+
+       ret = of_get_phy_mode(lp->pdev->dev.of_node);
+       if (ret < 0) {
+               dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+               goto err_out_unregister_clk_notifier;
+       }
+
+       lp->phy_interface = ret;
+
+       ret = dwceqos_mii_init(lp);
+       if (ret) {
+               dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
+               goto err_out_unregister_clk_notifier;
+       }
+
+       ret = dwceqos_mii_probe(ndev);
+       if (ret != 0) {
+               netdev_err(ndev, "mii_probe fail.\n");
+               ret = -ENXIO;
+               goto err_out_unregister_clk_notifier;
+       }
+
+       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+       tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
+                    (unsigned long)ndev);
+       tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+       lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+       INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
+
+       platform_set_drvdata(pdev, ndev);
+       ret = dwceqos_probe_config_dt(pdev);
+       if (ret) {
+               dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
+                       ret);
+               goto err_out_unregister_clk_notifier;
+       }
+       dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+                pdev->id, ndev->base_addr, ndev->irq);
+
+       ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
+                              ndev->name, ndev);
+       if (ret) {
+               dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
+                       ndev->irq, ret);
+               goto err_out_unregister_clk_notifier;
+       }
+
+       if (netif_msg_probe(lp))
+               netdev_dbg(ndev, "net_local@%p\n", lp);
+
+       return 0;
+
+err_out_unregister_clk_notifier:
+       clk_disable_unprepare(lp->phy_ref_clk);
+err_out_unregister_netdev:
+       unregister_netdev(ndev);
+err_out_clk_dis_aper:
+       clk_disable_unprepare(lp->apb_pclk);
+err_out_free_netdev:
+       if (lp->phy_node)
+               of_node_put(lp->phy_node);
+       free_netdev(ndev);
+       platform_set_drvdata(pdev, NULL);
+       return ret;
+}
+
+static int dwceqos_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct net_local *lp;
+
+       if (ndev) {
+               lp = netdev_priv(ndev);
+
+               if (lp->phy_dev)
+                       phy_disconnect(lp->phy_dev);
+               mdiobus_unregister(lp->mii_bus);
+               kfree(lp->mii_bus->irq);
+               mdiobus_free(lp->mii_bus);
+
+               unregister_netdev(ndev);
+
+               clk_disable_unprepare(lp->phy_ref_clk);
+               clk_disable_unprepare(lp->apb_pclk);
+
+               free_netdev(ndev);
+       }
+
+       return 0;
+}
+
+static struct platform_driver dwceqos_driver = {
+       .probe   = dwceqos_probe,
+       .remove  = dwceqos_remove,
+       .driver  = {
+               .name  = DRIVER_NAME,
+               .of_match_table = dwceq_of_match,
+       },
+};
+
+module_platform_driver(dwceqos_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
+MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
index f335bf119ab57d3e209a81e28cc7cbce5f9208c7..d155bf2573cd0ef00abc133074831cccc7f937b1 100644 (file)
@@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
 static int cpsw_poll(struct napi_struct *napi, int budget)
 {
        struct cpsw_priv        *priv = napi_to_priv(napi);
-       int                     num_tx, num_rx;
-
-       num_tx = cpdma_chan_process(priv->txch, 128);
+       int                     num_rx;
 
        num_rx = cpdma_chan_process(priv->rxch, budget);
        if (num_rx < budget) {
@@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
                }
        }
 
-       if (num_rx || num_tx)
-               cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
-                        num_rx, num_tx);
+       if (num_rx)
+               cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
 
        return num_rx;
 }
index bbacf5cccec2fcbc3831e8f61b559d51a5211816..a8a730641bbb14e723f841b2b3c13454b53a5491 100644 (file)
@@ -223,6 +223,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
 
 /* SGMII functions */
 int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
 int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
 int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
 
index 5ec4ed3f6c8def7a6a6cf527cd9ac7ca73c09844..29ae672917b7b3d8c9324b6a77b64193e903b14f 100644 (file)
@@ -51,6 +51,8 @@
                    NETIF_MSG_PKTDATA   | NETIF_MSG_TX_QUEUED   |       \
                    NETIF_MSG_RX_STATUS)
 
+#define NETCP_EFUSE_ADDR_SWAP  2
+
 #define knav_queue_get_id(q)   knav_queue_device_control(q, \
                                KNAV_QUEUE_GET_ID, (unsigned long)NULL)
 
@@ -172,13 +174,22 @@ static void set_words(u32 *words, int num_words, u32 *desc)
 }
 
 /* Read the e-fuse value as 32 bit values to be endian independent */
-static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
 {
        unsigned int addr0, addr1;
 
        addr1 = readl(efuse_mac + 4);
        addr0 = readl(efuse_mac);
 
+       switch (swap) {
+       case NETCP_EFUSE_ADDR_SWAP:
+               addr0 = addr1;
+               addr1 = readl(efuse_mac);
+               break;
+       default:
+               break;
+       }
+
        x[0] = (addr1 & 0x0000ff00) >> 8;
        x[1] = addr1 & 0x000000ff;
        x[2] = (addr0 & 0xff000000) >> 24;
@@ -1617,11 +1628,11 @@ static int netcp_ndo_open(struct net_device *ndev)
        }
        mutex_unlock(&netcp_modules_lock);
 
-       netcp_rxpool_refill(netcp);
        napi_enable(&netcp->rx_napi);
        napi_enable(&netcp->tx_napi);
        knav_queue_enable_notify(netcp->tx_compl_q);
        knav_queue_enable_notify(netcp->rx_queue);
+       netcp_rxpool_refill(netcp);
        netif_tx_wake_all_queues(ndev);
        dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
        return 0;
@@ -1902,7 +1913,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
                        goto quit;
                }
 
-               emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+               emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
                if (is_valid_ether_addr(efuse_mac_addr))
                        ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
                else
@@ -2112,6 +2123,7 @@ probe_quit:
 static int netcp_remove(struct platform_device *pdev)
 {
        struct netcp_device *netcp_device = platform_get_drvdata(pdev);
+       struct netcp_intf *netcp_intf, *netcp_tmp;
        struct netcp_inst_modpriv *inst_modpriv, *tmp;
        struct netcp_module *module;
 
@@ -2123,10 +2135,17 @@ static int netcp_remove(struct platform_device *pdev)
                list_del(&inst_modpriv->inst_list);
                kfree(inst_modpriv);
        }
-       WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
-            pdev->name);
 
-       devm_kfree(&pdev->dev, netcp_device);
+       /* now that all modules are removed, clean up the interfaces */
+       list_for_each_entry_safe(netcp_intf, netcp_tmp,
+                                &netcp_device->interface_head,
+                                interface_list) {
+               netcp_delete_interface(netcp_device, netcp_intf->ndev);
+       }
+
+       WARN(!list_empty(&netcp_device->interface_head),
+            "%s interface list not empty!\n", pdev->name);
+
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        platform_set_drvdata(pdev, NULL);
@@ -2142,7 +2161,6 @@ MODULE_DEVICE_TABLE(of, of_match);
 static struct platform_driver netcp_driver = {
        .driver = {
                .name           = "netcp-1.0",
-               .owner          = THIS_MODULE,
                .of_match_table = of_match,
        },
        .probe = netcp_probe,
index 9b7e0a34c98b10aca5eed610c47f33b2eedbbd00..6f16d6aaf7b76cdf5da3445b1a7c639f04e46200 100644 (file)
@@ -295,8 +295,6 @@ struct xgbe_hw_stats {
        u32     rx_dma_overruns;
 };
 
-#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
-
 struct gbenu_ss_regs {
        u32     id_ver;
        u32     synce_count;            /* NU */
@@ -480,7 +478,6 @@ struct gbenu_hw_stats {
        u32     tx_pri7_drop_bcnt;
 };
 
-#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
 #define GBENU_HW_STATS_REG_MAP_SZ      0x200
 
 struct gbe_ss_regs {
@@ -615,7 +612,6 @@ struct gbe_hw_stats {
        u32     rx_dma_overruns;
 };
 
-#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
 #define GBE_MAX_HW_STAT_MODS                   9
 #define GBE_HW_STATS_REG_MAP_SZ                        0x100
 
@@ -646,6 +642,7 @@ struct gbe_priv {
        bool                            enable_ale;
        u8                              max_num_slaves;
        u8                              max_num_ports; /* max_num_slaves + 1 */
+       u8                              num_stats_mods;
        struct netcp_tx_pipe            tx_pipe;
 
        int                             host_port;
@@ -675,6 +672,7 @@ struct gbe_priv {
        struct net_device               *dummy_ndev;
 
        u64                             *hw_stats;
+       u32                             *hw_stats_prev;
        const struct netcp_ethtool_stat *et_stats;
        int                             num_et_stats;
        /*  Lock for updating the hwstats */
@@ -874,7 +872,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
 };
 
 /* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_HOST_SIZE       33
+#define GBENU_ET_STATS_HOST_SIZE       52
 
 #define GBENU_STATS_HOST(field)                                        \
 {                                                              \
@@ -883,8 +881,8 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
-/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_PORT_SIZE       46
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE       65
 
 #define GBENU_STATS_P1(field)                                  \
 {                                                              \
@@ -976,7 +974,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_HOST(ale_unknown_mcast_bytes),
        GBENU_STATS_HOST(ale_unknown_bcast),
        GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+       GBENU_STATS_HOST(ale_pol_match),
+       GBENU_STATS_HOST(ale_pol_match_red),
+       GBENU_STATS_HOST(ale_pol_match_yellow),
        GBENU_STATS_HOST(tx_mem_protect_err),
+       GBENU_STATS_HOST(tx_pri0_drop),
+       GBENU_STATS_HOST(tx_pri1_drop),
+       GBENU_STATS_HOST(tx_pri2_drop),
+       GBENU_STATS_HOST(tx_pri3_drop),
+       GBENU_STATS_HOST(tx_pri4_drop),
+       GBENU_STATS_HOST(tx_pri5_drop),
+       GBENU_STATS_HOST(tx_pri6_drop),
+       GBENU_STATS_HOST(tx_pri7_drop),
+       GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri7_drop_bcnt),
        /* GBENU Module 1 */
        GBENU_STATS_P1(rx_good_frames),
        GBENU_STATS_P1(rx_broadcast_frames),
@@ -1023,7 +1040,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P1(ale_unknown_mcast_bytes),
        GBENU_STATS_P1(ale_unknown_bcast),
        GBENU_STATS_P1(ale_unknown_bcast_bytes),
+       GBENU_STATS_P1(ale_pol_match),
+       GBENU_STATS_P1(ale_pol_match_red),
+       GBENU_STATS_P1(ale_pol_match_yellow),
        GBENU_STATS_P1(tx_mem_protect_err),
+       GBENU_STATS_P1(tx_pri0_drop),
+       GBENU_STATS_P1(tx_pri1_drop),
+       GBENU_STATS_P1(tx_pri2_drop),
+       GBENU_STATS_P1(tx_pri3_drop),
+       GBENU_STATS_P1(tx_pri4_drop),
+       GBENU_STATS_P1(tx_pri5_drop),
+       GBENU_STATS_P1(tx_pri6_drop),
+       GBENU_STATS_P1(tx_pri7_drop),
+       GBENU_STATS_P1(tx_pri0_drop_bcnt),
+       GBENU_STATS_P1(tx_pri1_drop_bcnt),
+       GBENU_STATS_P1(tx_pri2_drop_bcnt),
+       GBENU_STATS_P1(tx_pri3_drop_bcnt),
+       GBENU_STATS_P1(tx_pri4_drop_bcnt),
+       GBENU_STATS_P1(tx_pri5_drop_bcnt),
+       GBENU_STATS_P1(tx_pri6_drop_bcnt),
+       GBENU_STATS_P1(tx_pri7_drop_bcnt),
        /* GBENU Module 2 */
        GBENU_STATS_P2(rx_good_frames),
        GBENU_STATS_P2(rx_broadcast_frames),
@@ -1070,7 +1106,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P2(ale_unknown_mcast_bytes),
        GBENU_STATS_P2(ale_unknown_bcast),
        GBENU_STATS_P2(ale_unknown_bcast_bytes),
+       GBENU_STATS_P2(ale_pol_match),
+       GBENU_STATS_P2(ale_pol_match_red),
+       GBENU_STATS_P2(ale_pol_match_yellow),
        GBENU_STATS_P2(tx_mem_protect_err),
+       GBENU_STATS_P2(tx_pri0_drop),
+       GBENU_STATS_P2(tx_pri1_drop),
+       GBENU_STATS_P2(tx_pri2_drop),
+       GBENU_STATS_P2(tx_pri3_drop),
+       GBENU_STATS_P2(tx_pri4_drop),
+       GBENU_STATS_P2(tx_pri5_drop),
+       GBENU_STATS_P2(tx_pri6_drop),
+       GBENU_STATS_P2(tx_pri7_drop),
+       GBENU_STATS_P2(tx_pri0_drop_bcnt),
+       GBENU_STATS_P2(tx_pri1_drop_bcnt),
+       GBENU_STATS_P2(tx_pri2_drop_bcnt),
+       GBENU_STATS_P2(tx_pri3_drop_bcnt),
+       GBENU_STATS_P2(tx_pri4_drop_bcnt),
+       GBENU_STATS_P2(tx_pri5_drop_bcnt),
+       GBENU_STATS_P2(tx_pri6_drop_bcnt),
+       GBENU_STATS_P2(tx_pri7_drop_bcnt),
        /* GBENU Module 3 */
        GBENU_STATS_P3(rx_good_frames),
        GBENU_STATS_P3(rx_broadcast_frames),
@@ -1117,7 +1172,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P3(ale_unknown_mcast_bytes),
        GBENU_STATS_P3(ale_unknown_bcast),
        GBENU_STATS_P3(ale_unknown_bcast_bytes),
+       GBENU_STATS_P3(ale_pol_match),
+       GBENU_STATS_P3(ale_pol_match_red),
+       GBENU_STATS_P3(ale_pol_match_yellow),
        GBENU_STATS_P3(tx_mem_protect_err),
+       GBENU_STATS_P3(tx_pri0_drop),
+       GBENU_STATS_P3(tx_pri1_drop),
+       GBENU_STATS_P3(tx_pri2_drop),
+       GBENU_STATS_P3(tx_pri3_drop),
+       GBENU_STATS_P3(tx_pri4_drop),
+       GBENU_STATS_P3(tx_pri5_drop),
+       GBENU_STATS_P3(tx_pri6_drop),
+       GBENU_STATS_P3(tx_pri7_drop),
+       GBENU_STATS_P3(tx_pri0_drop_bcnt),
+       GBENU_STATS_P3(tx_pri1_drop_bcnt),
+       GBENU_STATS_P3(tx_pri2_drop_bcnt),
+       GBENU_STATS_P3(tx_pri3_drop_bcnt),
+       GBENU_STATS_P3(tx_pri4_drop_bcnt),
+       GBENU_STATS_P3(tx_pri5_drop_bcnt),
+       GBENU_STATS_P3(tx_pri6_drop_bcnt),
+       GBENU_STATS_P3(tx_pri7_drop_bcnt),
        /* GBENU Module 4 */
        GBENU_STATS_P4(rx_good_frames),
        GBENU_STATS_P4(rx_broadcast_frames),
@@ -1164,7 +1238,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P4(ale_unknown_mcast_bytes),
        GBENU_STATS_P4(ale_unknown_bcast),
        GBENU_STATS_P4(ale_unknown_bcast_bytes),
+       GBENU_STATS_P4(ale_pol_match),
+       GBENU_STATS_P4(ale_pol_match_red),
+       GBENU_STATS_P4(ale_pol_match_yellow),
        GBENU_STATS_P4(tx_mem_protect_err),
+       GBENU_STATS_P4(tx_pri0_drop),
+       GBENU_STATS_P4(tx_pri1_drop),
+       GBENU_STATS_P4(tx_pri2_drop),
+       GBENU_STATS_P4(tx_pri3_drop),
+       GBENU_STATS_P4(tx_pri4_drop),
+       GBENU_STATS_P4(tx_pri5_drop),
+       GBENU_STATS_P4(tx_pri6_drop),
+       GBENU_STATS_P4(tx_pri7_drop),
+       GBENU_STATS_P4(tx_pri0_drop_bcnt),
+       GBENU_STATS_P4(tx_pri1_drop_bcnt),
+       GBENU_STATS_P4(tx_pri2_drop_bcnt),
+       GBENU_STATS_P4(tx_pri3_drop_bcnt),
+       GBENU_STATS_P4(tx_pri4_drop_bcnt),
+       GBENU_STATS_P4(tx_pri5_drop_bcnt),
+       GBENU_STATS_P4(tx_pri6_drop_bcnt),
+       GBENU_STATS_P4(tx_pri7_drop_bcnt),
        /* GBENU Module 5 */
        GBENU_STATS_P5(rx_good_frames),
        GBENU_STATS_P5(rx_broadcast_frames),
@@ -1211,7 +1304,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P5(ale_unknown_mcast_bytes),
        GBENU_STATS_P5(ale_unknown_bcast),
        GBENU_STATS_P5(ale_unknown_bcast_bytes),
+       GBENU_STATS_P5(ale_pol_match),
+       GBENU_STATS_P5(ale_pol_match_red),
+       GBENU_STATS_P5(ale_pol_match_yellow),
        GBENU_STATS_P5(tx_mem_protect_err),
+       GBENU_STATS_P5(tx_pri0_drop),
+       GBENU_STATS_P5(tx_pri1_drop),
+       GBENU_STATS_P5(tx_pri2_drop),
+       GBENU_STATS_P5(tx_pri3_drop),
+       GBENU_STATS_P5(tx_pri4_drop),
+       GBENU_STATS_P5(tx_pri5_drop),
+       GBENU_STATS_P5(tx_pri6_drop),
+       GBENU_STATS_P5(tx_pri7_drop),
+       GBENU_STATS_P5(tx_pri0_drop_bcnt),
+       GBENU_STATS_P5(tx_pri1_drop_bcnt),
+       GBENU_STATS_P5(tx_pri2_drop_bcnt),
+       GBENU_STATS_P5(tx_pri3_drop_bcnt),
+       GBENU_STATS_P5(tx_pri4_drop_bcnt),
+       GBENU_STATS_P5(tx_pri5_drop_bcnt),
+       GBENU_STATS_P5(tx_pri6_drop_bcnt),
+       GBENU_STATS_P5(tx_pri7_drop_bcnt),
        /* GBENU Module 6 */
        GBENU_STATS_P6(rx_good_frames),
        GBENU_STATS_P6(rx_broadcast_frames),
@@ -1258,7 +1370,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P6(ale_unknown_mcast_bytes),
        GBENU_STATS_P6(ale_unknown_bcast),
        GBENU_STATS_P6(ale_unknown_bcast_bytes),
+       GBENU_STATS_P6(ale_pol_match),
+       GBENU_STATS_P6(ale_pol_match_red),
+       GBENU_STATS_P6(ale_pol_match_yellow),
        GBENU_STATS_P6(tx_mem_protect_err),
+       GBENU_STATS_P6(tx_pri0_drop),
+       GBENU_STATS_P6(tx_pri1_drop),
+       GBENU_STATS_P6(tx_pri2_drop),
+       GBENU_STATS_P6(tx_pri3_drop),
+       GBENU_STATS_P6(tx_pri4_drop),
+       GBENU_STATS_P6(tx_pri5_drop),
+       GBENU_STATS_P6(tx_pri6_drop),
+       GBENU_STATS_P6(tx_pri7_drop),
+       GBENU_STATS_P6(tx_pri0_drop_bcnt),
+       GBENU_STATS_P6(tx_pri1_drop_bcnt),
+       GBENU_STATS_P6(tx_pri2_drop_bcnt),
+       GBENU_STATS_P6(tx_pri3_drop_bcnt),
+       GBENU_STATS_P6(tx_pri4_drop_bcnt),
+       GBENU_STATS_P6(tx_pri5_drop_bcnt),
+       GBENU_STATS_P6(tx_pri6_drop_bcnt),
+       GBENU_STATS_P6(tx_pri7_drop_bcnt),
        /* GBENU Module 7 */
        GBENU_STATS_P7(rx_good_frames),
        GBENU_STATS_P7(rx_broadcast_frames),
@@ -1305,7 +1436,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P7(ale_unknown_mcast_bytes),
        GBENU_STATS_P7(ale_unknown_bcast),
        GBENU_STATS_P7(ale_unknown_bcast_bytes),
+       GBENU_STATS_P7(ale_pol_match),
+       GBENU_STATS_P7(ale_pol_match_red),
+       GBENU_STATS_P7(ale_pol_match_yellow),
        GBENU_STATS_P7(tx_mem_protect_err),
+       GBENU_STATS_P7(tx_pri0_drop),
+       GBENU_STATS_P7(tx_pri1_drop),
+       GBENU_STATS_P7(tx_pri2_drop),
+       GBENU_STATS_P7(tx_pri3_drop),
+       GBENU_STATS_P7(tx_pri4_drop),
+       GBENU_STATS_P7(tx_pri5_drop),
+       GBENU_STATS_P7(tx_pri6_drop),
+       GBENU_STATS_P7(tx_pri7_drop),
+       GBENU_STATS_P7(tx_pri0_drop_bcnt),
+       GBENU_STATS_P7(tx_pri1_drop_bcnt),
+       GBENU_STATS_P7(tx_pri2_drop_bcnt),
+       GBENU_STATS_P7(tx_pri3_drop_bcnt),
+       GBENU_STATS_P7(tx_pri4_drop_bcnt),
+       GBENU_STATS_P7(tx_pri5_drop_bcnt),
+       GBENU_STATS_P7(tx_pri6_drop_bcnt),
+       GBENU_STATS_P7(tx_pri7_drop_bcnt),
        /* GBENU Module 8 */
        GBENU_STATS_P8(rx_good_frames),
        GBENU_STATS_P8(rx_broadcast_frames),
@@ -1352,7 +1502,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P8(ale_unknown_mcast_bytes),
        GBENU_STATS_P8(ale_unknown_bcast),
        GBENU_STATS_P8(ale_unknown_bcast_bytes),
+       GBENU_STATS_P8(ale_pol_match),
+       GBENU_STATS_P8(ale_pol_match_red),
+       GBENU_STATS_P8(ale_pol_match_yellow),
        GBENU_STATS_P8(tx_mem_protect_err),
+       GBENU_STATS_P8(tx_pri0_drop),
+       GBENU_STATS_P8(tx_pri1_drop),
+       GBENU_STATS_P8(tx_pri2_drop),
+       GBENU_STATS_P8(tx_pri3_drop),
+       GBENU_STATS_P8(tx_pri4_drop),
+       GBENU_STATS_P8(tx_pri5_drop),
+       GBENU_STATS_P8(tx_pri6_drop),
+       GBENU_STATS_P8(tx_pri7_drop),
+       GBENU_STATS_P8(tx_pri0_drop_bcnt),
+       GBENU_STATS_P8(tx_pri1_drop_bcnt),
+       GBENU_STATS_P8(tx_pri2_drop_bcnt),
+       GBENU_STATS_P8(tx_pri3_drop_bcnt),
+       GBENU_STATS_P8(tx_pri4_drop_bcnt),
+       GBENU_STATS_P8(tx_pri5_drop_bcnt),
+       GBENU_STATS_P8(tx_pri6_drop_bcnt),
+       GBENU_STATS_P8(tx_pri7_drop_bcnt),
 };
 
 #define XGBE_STATS0_INFO(field)                                \
@@ -1554,70 +1723,97 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
        }
 }
 
-static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
+{
+       void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+       u32  __iomem *p_stats_entry;
+       int i;
+
+       for (i = 0; i < gbe_dev->num_et_stats; i++) {
+               if (gbe_dev->et_stats[i].type == stats_mod) {
+                       p_stats_entry = base + gbe_dev->et_stats[i].offset;
+                       gbe_dev->hw_stats[i] = 0;
+                       gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+               }
+       }
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+                                            int et_stats_entry)
 {
        void __iomem *base = NULL;
-       u32  __iomem *p;
-       u32 tmp = 0;
+       u32  __iomem *p_stats_entry;
+       u32 curr, delta;
+
+       /* The hw_stats_regs pointers are already
+        * properly set to point to the right base:
+        */
+       base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+       p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+       curr = readl(p_stats_entry);
+       delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+       gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+       gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
        int i;
 
        for (i = 0; i < gbe_dev->num_et_stats; i++) {
-               base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
-               p = base + gbe_dev->et_stats[i].offset;
-               tmp = readl(p);
-               gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+               gbe_update_hw_stats_entry(gbe_dev, i);
+
                if (data)
                        data[i] = gbe_dev->hw_stats[i];
-               /* write-to-decrement:
-                * new register value = old register value - write value
-                */
-               writel(tmp, p);
        }
 }
 
-static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+                                              int stats_mod)
 {
-       void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
-       void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
-       u64 *hw_stats = &gbe_dev->hw_stats[0];
-       void __iomem *base = NULL;
-       u32  __iomem *p;
-       u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
-       int i, j, pair;
+       u32 val;
 
-       for (pair = 0; pair < 2; pair++) {
-               val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+       val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
 
-               if (pair == 0)
-                       val &= ~GBE_STATS_CD_SEL;
-               else
-                       val |= GBE_STATS_CD_SEL;
+       switch (stats_mod) {
+       case GBE_STATSA_MODULE:
+       case GBE_STATSB_MODULE:
+               val &= ~GBE_STATS_CD_SEL;
+               break;
+       case GBE_STATSC_MODULE:
+       case GBE_STATSD_MODULE:
+               val |= GBE_STATS_CD_SEL;
+               break;
+       default:
+               return;
+       }
 
-               /* make the stat modules visible */
-               writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+       /* make the stat module visible */
+       writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
 
-               for (i = 0; i < pair_size; i++) {
-                       j = pair * pair_size + i;
-                       switch (gbe_dev->et_stats[j].type) {
-                       case GBE_STATSA_MODULE:
-                       case GBE_STATSC_MODULE:
-                               base = gbe_statsa;
-                       break;
-                       case GBE_STATSB_MODULE:
-                       case GBE_STATSD_MODULE:
-                               base  = gbe_statsb;
-                       break;
-                       }
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+       gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+       gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+       u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+       int et_entry, j, pair;
+
+       for (pair = 0; pair < 2; pair++) {
+               gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+                                                     GBE_STATSC_MODULE :
+                                                     GBE_STATSA_MODULE));
+
+               for (j = 0; j < half_num_et_stats; j++) {
+                       et_entry = pair * half_num_et_stats + j;
+                       gbe_update_hw_stats_entry(gbe_dev, et_entry);
 
-                       p = base + gbe_dev->et_stats[j].offset;
-                       tmp = readl(p);
-                       hw_stats[j] += tmp;
                        if (data)
-                               data[j] = hw_stats[j];
-                       /* write-to-decrement:
-                        * new register value = old register value - write value
-                        */
-                       writel(tmp, p);
+                               data[et_entry] = gbe_dev->hw_stats[et_entry];
                }
        }
 }
@@ -1901,11 +2097,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
        writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
 }
 
+static void gbe_sgmii_rtreset(struct gbe_priv *priv,
+                             struct gbe_slave *slave, bool set)
+{
+       void __iomem *sgmii_port_regs;
+
+       if (SLAVE_LINK_IS_XGMII(slave))
+               return;
+
+       if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
+               sgmii_port_regs = priv->sgmii_port34_regs;
+       else
+               sgmii_port_regs = priv->sgmii_port_regs;
+
+       netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
+}
+
 static void gbe_slave_stop(struct gbe_intf *intf)
 {
        struct gbe_priv *gbe_dev = intf->gbe_dev;
        struct gbe_slave *slave = intf->slave;
 
+       gbe_sgmii_rtreset(gbe_dev, slave, true);
        gbe_port_reset(slave);
        /* Disable forwarding */
        cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +2160,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
 
        gbe_sgmii_config(priv, slave);
        gbe_port_reset(slave);
+       gbe_sgmii_rtreset(priv, slave, false);
        gbe_port_config(priv, slave, priv->rx_packet_max);
        gbe_set_slave_mac(slave, gbe_intf);
        /* enable forwarding */
@@ -2189,14 +2403,15 @@ static void netcp_ethss_timer(unsigned long arg)
                netcp_ethss_update_link_state(gbe_dev, slave, NULL);
        }
 
-       spin_lock_bh(&gbe_dev->hw_stats_lock);
+       /* A timer runs as a BH, no need to block them */
+       spin_lock(&gbe_dev->hw_stats_lock);
 
        if (gbe_dev->ss_version == GBE_SS_VERSION_14)
                gbe_update_stats_ver14(gbe_dev, NULL);
        else
                gbe_update_stats(gbe_dev, NULL);
 
-       spin_unlock_bh(&gbe_dev->hw_stats_lock);
+       spin_unlock(&gbe_dev->hw_stats_lock);
 
        gbe_dev->timer.expires  = jiffies + GBE_TIMER_INTERVAL;
        add_timer(&gbe_dev->timer);
@@ -2490,10 +2705,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
 {
        struct gbe_slave *slave;
 
-       for (;;) {
+       while (!list_empty(&gbe_dev->secondary_slaves)) {
                slave = first_sec_slave(gbe_dev);
-               if (!slave)
-                       break;
+
                if (slave->phy)
                        phy_disconnect(slave->phy);
                list_del(&slave->slave_list);
@@ -2554,15 +2768,28 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
        }
        gbe_dev->xgbe_serdes_regs = regs;
 
+       gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+       gbe_dev->et_stats = xgbe10_et_stats;
+       gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
        gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-                                 XGBE10_NUM_STAT_ENTRIES *
-                                 (gbe_dev->max_num_ports) * sizeof(u64),
-                                 GFP_KERNEL);
+                                        gbe_dev->num_et_stats * sizeof(u64),
+                                        GFP_KERNEL);
        if (!gbe_dev->hw_stats) {
                dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
                return -ENOMEM;
        }
 
+       gbe_dev->hw_stats_prev =
+               devm_kzalloc(gbe_dev->dev,
+                            gbe_dev->num_et_stats * sizeof(u32),
+                            GFP_KERNEL);
+       if (!gbe_dev->hw_stats_prev) {
+               dev_err(gbe_dev->dev,
+                       "hw_stats_prev memory allocation failed\n");
+               return -ENOMEM;
+       }
+
        gbe_dev->ss_version = XGBE_SS_VERSION_10;
        gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
                                        XGBE10_SGMII_MODULE_OFFSET;
@@ -2576,8 +2803,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
        gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
        gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
-       gbe_dev->et_stats = xgbe10_et_stats;
-       gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
        gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
        /* Subsystem registers */
@@ -2662,30 +2887,45 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
        }
        gbe_dev->switch_regs = regs;
 
+       gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+       gbe_dev->et_stats = gbe13_et_stats;
+       gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
        gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-                                         GBE13_NUM_HW_STAT_ENTRIES *
-                                         gbe_dev->max_num_slaves * sizeof(u64),
-                                         GFP_KERNEL);
+                                        gbe_dev->num_et_stats * sizeof(u64),
+                                        GFP_KERNEL);
        if (!gbe_dev->hw_stats) {
                dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
                return -ENOMEM;
        }
 
+       gbe_dev->hw_stats_prev =
+               devm_kzalloc(gbe_dev->dev,
+                            gbe_dev->num_et_stats * sizeof(u32),
+                            GFP_KERNEL);
+       if (!gbe_dev->hw_stats_prev) {
+               dev_err(gbe_dev->dev,
+                       "hw_stats_prev memory allocation failed\n");
+               return -ENOMEM;
+       }
+
        gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
        gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
 
+       /* K2HK has only 2 hw stats modules visible at a time, so
+        * module 0 & 2 points to one base and
+        * module 1 & 3 points to the other base
+        */
        for (i = 0; i < gbe_dev->max_num_slaves; i++) {
                gbe_dev->hw_stats_regs[i] =
                        gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
-                       (GBE_HW_STATS_REG_MAP_SZ * i);
+                       (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
        }
 
        gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
        gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = GBE13_HOST_PORT_NUM;
        gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-       gbe_dev->et_stats = gbe13_et_stats;
-       gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
        gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
 
        /* Subsystem registers */
@@ -2712,15 +2952,34 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
        void __iomem *regs;
        int i, ret;
 
+       gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+       gbe_dev->et_stats = gbenu_et_stats;
+
+       if (IS_SS_ID_NU(gbe_dev))
+               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+                       (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+       else
+               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+                                       GBENU_ET_STATS_PORT_SIZE;
+
        gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-                                 GBENU_NUM_HW_STAT_ENTRIES *
-                                 (gbe_dev->max_num_ports) * sizeof(u64),
-                                 GFP_KERNEL);
+                                        gbe_dev->num_et_stats * sizeof(u64),
+                                        GFP_KERNEL);
        if (!gbe_dev->hw_stats) {
                dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
                return -ENOMEM;
        }
 
+       gbe_dev->hw_stats_prev =
+               devm_kzalloc(gbe_dev->dev,
+                            gbe_dev->num_et_stats * sizeof(u32),
+                            GFP_KERNEL);
+       if (!gbe_dev->hw_stats_prev) {
+               dev_err(gbe_dev->dev,
+                       "hw_stats_prev memory allocation failed\n");
+               return -ENOMEM;
+       }
+
        ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
        if (ret) {
                dev_err(gbe_dev->dev,
@@ -2748,16 +3007,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
        gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = GBENU_HOST_PORT_NUM;
        gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-       gbe_dev->et_stats = gbenu_et_stats;
        gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
-       if (IS_SS_ID_NU(gbe_dev))
-               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-                       (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
-       else
-               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-                                       GBENU_ET_STATS_PORT_SIZE;
-
        /* Subsystem registers */
        GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
 
@@ -2787,7 +3038,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        struct cpsw_ale_params ale_params;
        struct gbe_priv *gbe_dev;
        u32 slave_num;
-       int ret = 0;
+       int i, ret = 0;
 
        if (!node) {
                dev_err(dev, "device tree info unavailable\n");
@@ -2839,14 +3090,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
                                      &gbe_dev->dma_chan_name);
        if (ret < 0) {
                dev_err(dev, "missing \"tx-channel\" parameter\n");
-               ret = -ENODEV;
-               goto quit;
+               return -EINVAL;
        }
 
        if (!strcmp(node->name, "gbe")) {
                ret = get_gbe_resource_version(gbe_dev, node);
                if (ret)
-                       goto quit;
+                       return ret;
 
                dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
 
@@ -2857,22 +3107,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
                else
                        ret = -ENODEV;
 
-               if (ret)
-                       goto quit;
        } else if (!strcmp(node->name, "xgbe")) {
                ret = set_xgbe_ethss10_priv(gbe_dev, node);
                if (ret)
-                       goto quit;
+                       return ret;
                ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
                                             gbe_dev->ss_regs);
-               if (ret)
-                       goto quit;
        } else {
                dev_err(dev, "unknown GBE node(%s)\n", node->name);
                ret = -ENODEV;
-               goto quit;
        }
 
+       if (ret)
+               return ret;
+
        interfaces = of_get_child_by_name(node, "interfaces");
        if (!interfaces)
                dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +3128,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
                                gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
        if (ret)
-               goto quit;
+               return ret;
 
        ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
        if (ret)
-               goto quit;
+               return ret;
 
        /* Create network interfaces */
        INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +3147,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
                if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
                        break;
        }
+       of_node_put(interfaces);
 
        if (!gbe_dev->num_slaves)
                dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +3160,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        of_node_put(secondary_ports);
 
        if (!gbe_dev->num_slaves) {
-               dev_err(dev, "No network interface or secondary ports configured\n");
+               dev_err(dev,
+                       "No network interface or secondary ports configured\n");
                ret = -ENODEV;
-               goto quit;
+               goto free_sec_ports;
        }
 
        memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +3177,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        if (!gbe_dev->ale) {
                dev_err(gbe_dev->dev, "error initializing ale engine\n");
                ret = -ENODEV;
-               goto quit;
+               goto free_sec_ports;
        } else {
                dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
        }
@@ -2935,6 +3185,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        /* initialize host port */
        gbe_init_host_port(gbe_dev);
 
+       spin_lock_bh(&gbe_dev->hw_stats_lock);
+       for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+               if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+                       gbe_reset_mod_stats_ver14(gbe_dev, i);
+               else
+                       gbe_reset_mod_stats(gbe_dev, i);
+       }
+       spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
        init_timer(&gbe_dev->timer);
        gbe_dev->timer.data      = (unsigned long)gbe_dev;
        gbe_dev->timer.function = netcp_ethss_timer;
@@ -2943,14 +3202,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        *inst_priv = gbe_dev;
        return 0;
 
-quit:
-       if (gbe_dev->hw_stats)
-               devm_kfree(dev, gbe_dev->hw_stats);
-       cpsw_ale_destroy(gbe_dev->ale);
-       if (gbe_dev->ss_regs)
-               devm_iounmap(dev, gbe_dev->ss_regs);
-       of_node_put(interfaces);
-       devm_kfree(dev, gbe_dev);
+free_sec_ports:
+       free_secondary_ports(gbe_dev);
        return ret;
 }
 
@@ -3023,12 +3276,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
        free_secondary_ports(gbe_dev);
 
        if (!list_empty(&gbe_dev->gbe_intf_head))
-               dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
+               dev_alert(gbe_dev->dev,
+                         "unreleased ethss interfaces present\n");
 
-       devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
-       devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
-       memset(gbe_dev, 0x00, sizeof(*gbe_dev));
-       devm_kfree(gbe_dev->dev, gbe_dev);
        return 0;
 }
 
index dbeb14266e2fb106c8a16b7d32b73fbc28095ec7..5d8419f658d04397a7be30c2a1b9a249f492e6e8 100644 (file)
@@ -18,6 +18,9 @@
 
 #include "netcp.h"
 
+#define SGMII_SRESET_RESET             BIT(0)
+#define SGMII_SRESET_RTRESET           BIT(1)
+
 #define SGMII_REG_STATUS_LOCK          BIT(4)
 #define        SGMII_REG_STATUS_LINK           BIT(0)
 #define SGMII_REG_STATUS_AUTONEG       BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
 int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
 {
        /* Soft reset */
-       sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1);
-       while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0)
+       sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
+                           SGMII_SRESET_RESET);
+
+       while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
+               SGMII_SRESET_RESET) != 0x0)
                ;
+
        return 0;
 }
 
+/* port is 0 based */
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
+{
+       u32 reg;
+       bool oldval;
+
+       /* Initiate a soft reset */
+       reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
+       oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
+       if (set)
+               reg |= SGMII_SRESET_RTRESET;
+       else
+               reg &= ~SGMII_SRESET_RTRESET;
+       sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
+       wmb();
+
+       return oldval;
+}
+
 int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
 {
        u32 status = 0, link = 0;
index dd4544085db321d2f9020d97ebbbb9a8887ed4b9..5ce7020ca53004b602df31a11954f1fb6c7e7e23 100644 (file)
@@ -541,6 +541,29 @@ union nvsp_2_message_uber {
        struct nvsp_2_free_rxbuf free_rxbuf;
 } __packed;
 
+struct nvsp_4_send_vf_association {
+       /* 1: allocated, serial number is valid. 0: not allocated */
+       u32 allocated;
+
+       /* Serial number of the VF to team with */
+       u32 serial;
+} __packed;
+
+enum nvsp_vm_datapath {
+       NVSP_DATAPATH_SYNTHETIC = 0,
+       NVSP_DATAPATH_VF,
+       NVSP_DATAPATH_MAX
+};
+
+struct nvsp_4_sw_datapath {
+       u32 active_datapath; /* active data path in VM */
+} __packed;
+
+union nvsp_4_message_uber {
+       struct nvsp_4_send_vf_association vf_assoc;
+       struct nvsp_4_sw_datapath active_dp;
+} __packed;
+
 enum nvsp_subchannel_operation {
        NVSP_SUBCHANNEL_NONE = 0,
        NVSP_SUBCHANNEL_ALLOCATE,
@@ -578,6 +601,7 @@ union nvsp_all_messages {
        union nvsp_message_init_uber init_msg;
        union nvsp_1_message_uber v1_msg;
        union nvsp_2_message_uber v2_msg;
+       union nvsp_4_message_uber v4_msg;
        union nvsp_5_message_uber v5_msg;
 } __packed;
 
@@ -589,6 +613,7 @@ struct nvsp_message {
 
 
 #define NETVSC_MTU 65536
+#define NETVSC_MTU_MIN 68
 
 #define NETVSC_RECEIVE_BUFFER_SIZE             (1024*1024*16)  /* 16MB */
 #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY      (1024*1024*15)  /* 15MB */
@@ -670,6 +695,8 @@ struct netvsc_device {
        u32 send_table[VRSS_SEND_TAB_SIZE];
        u32 max_chn;
        u32 num_chn;
+       spinlock_t sc_lock; /* Protects num_sc_offered variable */
+       u32 num_sc_offered;
        atomic_t queue_sends[NR_CPUS];
 
        /* Holds rndis device info */
@@ -688,6 +715,11 @@ struct netvsc_device {
 
        /* The net device context */
        struct net_device_context *nd_ctx;
+
+       /* 1: allocated, serial number is valid. 0: not allocated */
+       u32 vf_alloc;
+       /* Serial number of the VF to team with */
+       u32 vf_serial;
 };
 
 /* NdisInitialize message */
index 23126a74f3577b4263a8136f41548c396c7d2d58..51e4c0fd0a7480c4c705a79a9cf8ec2f627862f9 100644 (file)
@@ -453,13 +453,16 @@ static int negotiate_nvsp_ver(struct hv_device *device,
        if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
                return 0;
 
-       /* NVSPv2 only: Send NDIS config */
+       /* NVSPv2 or later: Send NDIS config */
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
        init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
                                                       ETH_HLEN;
        init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 
+       if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
+               init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+
        ret = vmbus_sendpacket(device->channel, init_packet,
                                sizeof(struct nvsp_message),
                                (unsigned long)init_packet,
@@ -1064,11 +1067,10 @@ static void netvsc_receive(struct netvsc_device *net_device,
 
 
 static void netvsc_send_table(struct hv_device *hdev,
-                             struct vmpacket_descriptor *vmpkt)
+                             struct nvsp_message *nvmsg)
 {
        struct netvsc_device *nvscdev;
        struct net_device *ndev;
-       struct nvsp_message *nvmsg;
        int i;
        u32 count, *tab;
 
@@ -1077,12 +1079,6 @@ static void netvsc_send_table(struct hv_device *hdev,
                return;
        ndev = nvscdev->ndev;
 
-       nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
-                                       (vmpkt->offset8 << 3));
-
-       if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
-               return;
-
        count = nvmsg->msg.v5_msg.send_table.count;
        if (count != VRSS_SEND_TAB_SIZE) {
                netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1096,6 +1092,28 @@ static void netvsc_send_table(struct hv_device *hdev,
                nvscdev->send_table[i] = tab[i];
 }
 
+static void netvsc_send_vf(struct netvsc_device *nvdev,
+                          struct nvsp_message *nvmsg)
+{
+       nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+       nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+}
+
+static inline void netvsc_receive_inband(struct hv_device *hdev,
+                                        struct netvsc_device *nvdev,
+                                        struct nvsp_message *nvmsg)
+{
+       switch (nvmsg->hdr.msg_type) {
+       case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+               netvsc_send_table(hdev, nvmsg);
+               break;
+
+       case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+               netvsc_send_vf(nvdev, nvmsg);
+               break;
+       }
+}
+
 void netvsc_channel_cb(void *context)
 {
        int ret;
@@ -1108,6 +1126,7 @@ void netvsc_channel_cb(void *context)
        unsigned char *buffer;
        int bufferlen = NETVSC_PACKET_SIZE;
        struct net_device *ndev;
+       struct nvsp_message *nvmsg;
 
        if (channel->primary_channel != NULL)
                device = channel->primary_channel->device_obj;
@@ -1126,6 +1145,8 @@ void netvsc_channel_cb(void *context)
                if (ret == 0) {
                        if (bytes_recvd > 0) {
                                desc = (struct vmpacket_descriptor *)buffer;
+                               nvmsg = (struct nvsp_message *)((unsigned long)
+                                        desc + (desc->offset8 << 3));
                                switch (desc->type) {
                                case VM_PKT_COMP:
                                        netvsc_send_completion(net_device,
@@ -1138,7 +1159,9 @@ void netvsc_channel_cb(void *context)
                                        break;
 
                                case VM_PKT_DATA_INBAND:
-                                       netvsc_send_table(device, desc);
+                                       netvsc_receive_inband(device,
+                                                             net_device,
+                                                             nvmsg);
                                        break;
 
                                default:
index 358475ed9b5964c53f038c61f7fb8a3996c2a5ab..7b36d5fecc1f24b95c87477ebd43a06ec5a4f914 100644 (file)
@@ -106,7 +106,7 @@ static int netvsc_open(struct net_device *net)
                return ret;
        }
 
-       netif_tx_start_all_queues(net);
+       netif_tx_wake_all_queues(net);
 
        nvdev = hv_get_drvdata(device_obj);
        rdev = nvdev->extension;
@@ -120,15 +120,56 @@ static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct hv_device *device_obj = net_device_ctx->device_ctx;
+       struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
        int ret;
+       u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
+       struct vmbus_channel *chn;
 
        netif_tx_disable(net);
 
        /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
        cancel_work_sync(&net_device_ctx->work);
        ret = rndis_filter_close(device_obj);
-       if (ret != 0)
+       if (ret != 0) {
                netdev_err(net, "unable to close device (ret %d).\n", ret);
+               return ret;
+       }
+
+       /* Ensure pending bytes in ring are read */
+       while (true) {
+               aread = 0;
+               for (i = 0; i < nvdev->num_chn; i++) {
+                       chn = nvdev->chn_table[i];
+                       if (!chn)
+                               continue;
+
+                       hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
+                                                    &awrite);
+
+                       if (aread)
+                               break;
+
+                       hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
+                                                    &awrite);
+
+                       if (aread)
+                               break;
+               }
+
+               retry++;
+               if (retry > retry_max || aread == 0)
+                       break;
+
+               msleep(msec);
+
+               if (msec < 1000)
+                       msec *= 2;
+       }
+
+       if (aread) {
+               netdev_err(net, "Ring buffer not empty after closing rndis\n");
+               ret = -ETIMEDOUT;
+       }
 
        return ret;
 }
@@ -736,6 +777,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        struct netvsc_device *nvdev = hv_get_drvdata(hdev);
        struct netvsc_device_info device_info;
        int limit = ETH_DATA_LEN;
+       int ret = 0;
 
        if (nvdev == NULL || nvdev->destroy)
                return -ENODEV;
@@ -743,13 +785,14 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
                limit = NETVSC_MTU - ETH_HLEN;
 
-       /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
-       if (mtu < ETH_DATA_LEN || mtu > limit)
+       if (mtu < NETVSC_MTU_MIN || mtu > limit)
                return -EINVAL;
 
+       ret = netvsc_close(ndev);
+       if (ret)
+               goto out;
+
        nvdev->start_remove = true;
-       cancel_work_sync(&ndevctx->work);
-       netif_tx_disable(ndev);
        rndis_filter_device_remove(hdev);
 
        ndev->mtu = mtu;
@@ -759,9 +802,11 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        device_info.ring_size = ring_size;
        device_info.max_num_vrss_chns = max_num_vrss_chns;
        rndis_filter_device_add(hdev, &device_info);
-       netif_tx_wake_all_queues(ndev);
 
-       return 0;
+out:
+       netvsc_open(ndev);
+
+       return ret;
 }
 
 static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
index 236aeb76ef224ba5eaf9e0994b6e9363434047ac..9b8263db49cc30c8a079cd27d0ba08aa67df34cd 100644 (file)
@@ -984,9 +984,16 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
        struct netvsc_device *nvscdev;
        u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
        int ret;
+       unsigned long flags;
 
        nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
 
+       spin_lock_irqsave(&nvscdev->sc_lock, flags);
+       nvscdev->num_sc_offered--;
+       spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+       if (nvscdev->num_sc_offered == 0)
+               complete(&nvscdev->channel_init_wait);
+
        if (chn_index >= nvscdev->num_chn)
                return;
 
@@ -1015,8 +1022,10 @@ int rndis_filter_device_add(struct hv_device *dev,
        u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
        u32 mtu, size;
        u32 num_rss_qs;
+       u32 sc_delta;
        const struct cpumask *node_cpu_mask;
        u32 num_possible_rss_qs;
+       unsigned long flags;
 
        rndis_device = get_rndis_device();
        if (!rndis_device)
@@ -1039,6 +1048,8 @@ int rndis_filter_device_add(struct hv_device *dev,
        net_device->max_chn = 1;
        net_device->num_chn = 1;
 
+       spin_lock_init(&net_device->sc_lock);
+
        net_device->extension = rndis_device;
        rndis_device->net_dev = net_device;
 
@@ -1054,7 +1065,7 @@ int rndis_filter_device_add(struct hv_device *dev,
        ret = rndis_filter_query_device(rndis_device,
                                        RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
                                        &mtu, &size);
-       if (ret == 0 && size == sizeof(u32))
+       if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
                net_device->ndev->mtu = mtu;
 
        /* Get the mac address */
@@ -1116,6 +1127,9 @@ int rndis_filter_device_add(struct hv_device *dev,
        num_possible_rss_qs = cpumask_weight(node_cpu_mask);
        net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
 
+       num_rss_qs = net_device->num_chn - 1;
+       net_device->num_sc_offered = num_rss_qs;
+
        if (net_device->num_chn == 1)
                goto out;
 
@@ -1157,11 +1171,25 @@ int rndis_filter_device_add(struct hv_device *dev,
 
        ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
 
+       /*
+        * Wait for the host to send us the sub-channel offers.
+        */
+       spin_lock_irqsave(&net_device->sc_lock, flags);
+       sc_delta = num_rss_qs - (net_device->num_chn - 1);
+       net_device->num_sc_offered -= sc_delta;
+       spin_unlock_irqrestore(&net_device->sc_lock, flags);
+
+       while (net_device->num_sc_offered != 0) {
+               t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
+               if (t == 0)
+                       WARN(1, "Netvsc: Waiting for sub-channel processing");
+       }
 out:
        if (ret) {
                net_device->max_chn = 1;
                net_device->num_chn = 1;
        }
+
        return 0; /* return 0 because primary channel can be used alone */
 
 err_dev_remv:
index f7bd9f3ddaac8c2044e2ea3215e02b98fdc3587d..d0d5bf6cbb686a357e4e5ed98ad652c0dbf6908a 100644 (file)
@@ -545,7 +545,9 @@ at86rf230_async_state_delay(void *context)
        }
 
        /* Default delay is 1us in the most cases */
-       tim = ktime_set(0, NSEC_PER_USEC);
+       udelay(1);
+       at86rf230_async_state_timer(&ctx->timer);
+       return;
 
 change:
        hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
index b6fc295796679554fc8a9d499eb349dc1bdd662f..613dae559925f947586f8f011aad315d0705866c 100644 (file)
@@ -1151,7 +1151,6 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids);
 static struct spi_driver cc2520_driver = {
        .driver = {
                .name = "cc2520",
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
                .of_match_table = of_match_ptr(cc2520_of_ids),
        },
index 2549760e039fd803fc747f501d74c336fac71b58..997724b8e4343b3ab068a86463393e8008f6ee91 100644 (file)
@@ -812,7 +812,6 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
 static struct spi_driver mrf24j40_driver = {
        .driver = {
                .name = "mrf24j40",
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
        },
        .id_table = mrf24j40_ids,
index 94570aace2414a5984d989045c599ed141582eb5..cc56fac3c3f83ef51e4f42f67f9e1c4e549f7725 100644 (file)
 #include <net/net_namespace.h>
 
 #define TX_Q_LIMIT    32
-struct ifb_private {
+struct ifb_q_private {
+       struct net_device       *dev;
        struct tasklet_struct   ifb_tasklet;
-       int     tasklet_pending;
-
-       struct u64_stats_sync   rsync;
+       int                     tasklet_pending;
+       int                     txqnum;
        struct sk_buff_head     rq;
-       u64 rx_packets;
-       u64 rx_bytes;
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       struct u64_stats_sync   rsync;
 
        struct u64_stats_sync   tsync;
+       u64                     tx_packets;
+       u64                     tx_bytes;
        struct sk_buff_head     tq;
-       u64 tx_packets;
-       u64 tx_bytes;
-};
+} ____cacheline_aligned_in_smp;
 
-static int numifbs = 2;
+struct ifb_dev_private {
+       struct ifb_q_private *tx_private;
+};
 
-static void ri_tasklet(unsigned long dev);
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
 static int ifb_open(struct net_device *dev);
 static int ifb_close(struct net_device *dev);
 
-static void ri_tasklet(unsigned long dev)
+static void ifb_ri_tasklet(unsigned long _txp)
 {
-       struct net_device *_dev = (struct net_device *)dev;
-       struct ifb_private *dp = netdev_priv(_dev);
+       struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
        struct netdev_queue *txq;
        struct sk_buff *skb;
 
-       txq = netdev_get_tx_queue(_dev, 0);
-       if ((skb = skb_peek(&dp->tq)) == NULL) {
-               if (__netif_tx_trylock(txq)) {
-                       skb_queue_splice_tail_init(&dp->rq, &dp->tq);
-                       __netif_tx_unlock(txq);
-               } else {
-                       /* reschedule */
+       txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
+       skb = skb_peek(&txp->tq);
+       if (!skb) {
+               if (!__netif_tx_trylock(txq))
                        goto resched;
-               }
+               skb_queue_splice_tail_init(&txp->rq, &txp->tq);
+               __netif_tx_unlock(txq);
        }
 
-       while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
+       while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
                u32 from = G_TC_FROM(skb->tc_verd);
 
                skb->tc_verd = 0;
                skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
 
-               u64_stats_update_begin(&dp->tsync);
-               dp->tx_packets++;
-               dp->tx_bytes += skb->len;
-               u64_stats_update_end(&dp->tsync);
+               u64_stats_update_begin(&txp->tsync);
+               txp->tx_packets++;
+               txp->tx_bytes += skb->len;
+               u64_stats_update_end(&txp->tsync);
 
                rcu_read_lock();
-               skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
+               skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
                if (!skb->dev) {
                        rcu_read_unlock();
                        dev_kfree_skb(skb);
-                       _dev->stats.tx_dropped++;
-                       if (skb_queue_len(&dp->tq) != 0)
+                       txp->dev->stats.tx_dropped++;
+                       if (skb_queue_len(&txp->tq) != 0)
                                goto resched;
                        break;
                }
                rcu_read_unlock();
-               skb->skb_iif = _dev->ifindex;
+               skb->skb_iif = txp->dev->ifindex;
 
                if (from & AT_EGRESS) {
                        dev_queue_xmit(skb);
@@ -112,10 +111,11 @@ static void ri_tasklet(unsigned long dev)
        }
 
        if (__netif_tx_trylock(txq)) {
-               if ((skb = skb_peek(&dp->rq)) == NULL) {
-                       dp->tasklet_pending = 0;
-                       if (netif_queue_stopped(_dev))
-                               netif_wake_queue(_dev);
+               skb = skb_peek(&txp->rq);
+               if (!skb) {
+                       txp->tasklet_pending = 0;
+                       if (netif_tx_queue_stopped(txq))
+                               netif_tx_wake_queue(txq);
                } else {
                        __netif_tx_unlock(txq);
                        goto resched;
@@ -123,8 +123,8 @@ static void ri_tasklet(unsigned long dev)
                __netif_tx_unlock(txq);
        } else {
 resched:
-               dp->tasklet_pending = 1;
-               tasklet_schedule(&dp->ifb_tasklet);
+               txp->tasklet_pending = 1;
+               tasklet_schedule(&txp->ifb_tasklet);
        }
 
 }
@@ -132,29 +132,58 @@ resched:
 static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
                                             struct rtnl_link_stats64 *stats)
 {
-       struct ifb_private *dp = netdev_priv(dev);
+       struct ifb_dev_private *dp = netdev_priv(dev);
+       struct ifb_q_private *txp = dp->tx_private;
        unsigned int start;
-
-       do {
-               start = u64_stats_fetch_begin_irq(&dp->rsync);
-               stats->rx_packets = dp->rx_packets;
-               stats->rx_bytes = dp->rx_bytes;
-       } while (u64_stats_fetch_retry_irq(&dp->rsync, start));
-
-       do {
-               start = u64_stats_fetch_begin_irq(&dp->tsync);
-
-               stats->tx_packets = dp->tx_packets;
-               stats->tx_bytes = dp->tx_bytes;
-
-       } while (u64_stats_fetch_retry_irq(&dp->tsync, start));
-
+       u64 packets, bytes;
+       int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+               do {
+                       start = u64_stats_fetch_begin_irq(&txp->rsync);
+                       packets = txp->rx_packets;
+                       bytes = txp->rx_bytes;
+               } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
+               stats->rx_packets += packets;
+               stats->rx_bytes += bytes;
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&txp->tsync);
+                       packets = txp->tx_packets;
+                       bytes = txp->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
+               stats->tx_packets += packets;
+               stats->tx_bytes += bytes;
+       }
        stats->rx_dropped = dev->stats.rx_dropped;
        stats->tx_dropped = dev->stats.tx_dropped;
 
        return stats;
 }
 
+static int ifb_dev_init(struct net_device *dev)
+{
+       struct ifb_dev_private *dp = netdev_priv(dev);
+       struct ifb_q_private *txp;
+       int i;
+
+       txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
+       if (!txp)
+               return -ENOMEM;
+       dp->tx_private = txp;
+       for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+               txp->txqnum = i;
+               txp->dev = dev;
+               __skb_queue_head_init(&txp->rq);
+               __skb_queue_head_init(&txp->tq);
+               u64_stats_init(&txp->rsync);
+               u64_stats_init(&txp->tsync);
+               tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
+                            (unsigned long)txp);
+               netif_tx_start_queue(netdev_get_tx_queue(dev, i));
+       }
+       return 0;
+}
 
 static const struct net_device_ops ifb_netdev_ops = {
        .ndo_open       = ifb_open,
@@ -162,6 +191,7 @@ static const struct net_device_ops ifb_netdev_ops = {
        .ndo_get_stats64 = ifb_stats64,
        .ndo_start_xmit = ifb_xmit,
        .ndo_validate_addr = eth_validate_addr,
+       .ndo_init       = ifb_dev_init,
 };
 
 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST | \
@@ -169,10 +199,24 @@ static const struct net_device_ops ifb_netdev_ops = {
                      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX         | \
                      NETIF_F_HW_VLAN_STAG_TX)
 
+static void ifb_dev_free(struct net_device *dev)
+{
+       struct ifb_dev_private *dp = netdev_priv(dev);
+       struct ifb_q_private *txp = dp->tx_private;
+       int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+               tasklet_kill(&txp->ifb_tasklet);
+               __skb_queue_purge(&txp->rq);
+               __skb_queue_purge(&txp->tq);
+       }
+       kfree(dp->tx_private);
+       free_netdev(dev);
+}
+
 static void ifb_setup(struct net_device *dev)
 {
        /* Initialize the device structure. */
-       dev->destructor = free_netdev;
        dev->netdev_ops = &ifb_netdev_ops;
 
        /* Fill in device structure with ethernet-generic values. */
@@ -188,17 +232,19 @@ static void ifb_setup(struct net_device *dev)
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        netif_keep_dst(dev);
        eth_hw_addr_random(dev);
+       dev->destructor = ifb_dev_free;
 }
 
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       struct ifb_private *dp = netdev_priv(dev);
+       struct ifb_dev_private *dp = netdev_priv(dev);
        u32 from = G_TC_FROM(skb->tc_verd);
+       struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
 
-       u64_stats_update_begin(&dp->rsync);
-       dp->rx_packets++;
-       dp->rx_bytes += skb->len;
-       u64_stats_update_end(&dp->rsync);
+       u64_stats_update_begin(&txp->rsync);
+       txp->rx_packets++;
+       txp->rx_bytes += skb->len;
+       u64_stats_update_end(&txp->rsync);
 
        if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
                dev_kfree_skb(skb);
@@ -206,14 +252,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
-       if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
-               netif_stop_queue(dev);
-       }
+       if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
+               netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
 
-       __skb_queue_tail(&dp->rq, skb);
-       if (!dp->tasklet_pending) {
-               dp->tasklet_pending = 1;
-               tasklet_schedule(&dp->ifb_tasklet);
+       __skb_queue_tail(&txp->rq, skb);
+       if (!txp->tasklet_pending) {
+               txp->tasklet_pending = 1;
+               tasklet_schedule(&txp->ifb_tasklet);
        }
 
        return NETDEV_TX_OK;
@@ -221,24 +266,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int ifb_close(struct net_device *dev)
 {
-       struct ifb_private *dp = netdev_priv(dev);
-
-       tasklet_kill(&dp->ifb_tasklet);
-       netif_stop_queue(dev);
-       __skb_queue_purge(&dp->rq);
-       __skb_queue_purge(&dp->tq);
+       netif_tx_stop_all_queues(dev);
        return 0;
 }
 
 static int ifb_open(struct net_device *dev)
 {
-       struct ifb_private *dp = netdev_priv(dev);
-
-       tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
-       __skb_queue_head_init(&dp->rq);
-       __skb_queue_head_init(&dp->tq);
-       netif_start_queue(dev);
-
+       netif_tx_start_all_queues(dev);
        return 0;
 }
 
@@ -255,31 +289,30 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
 
 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
        .kind           = "ifb",
-       .priv_size      = sizeof(struct ifb_private),
+       .priv_size      = sizeof(struct ifb_dev_private),
        .setup          = ifb_setup,
        .validate       = ifb_validate,
 };
 
-/* Number of ifb devices to be set up by this module. */
+/* Number of ifb devices to be set up by this module.
+ * Note that these legacy devices have one queue.
+ * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
+ */
+static int numifbs = 2;
 module_param(numifbs, int, 0);
 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
 
 static int __init ifb_init_one(int index)
 {
        struct net_device *dev_ifb;
-       struct ifb_private *dp;
        int err;
 
-       dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d",
+       dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
                               NET_NAME_UNKNOWN, ifb_setup);
 
        if (!dev_ifb)
                return -ENOMEM;
 
-       dp = netdev_priv(dev_ifb);
-       u64_stats_init(&dp->rsync);
-       u64_stats_init(&dp->tsync);
-
        dev_ifb->rtnl_link_ops = &ifb_link_ops;
        err = register_netdevice(dev_ifb);
        if (err < 0)
index 953a97492fabf46eda9986ad713e4cc9ec4275dd..9542b7bac61afab0f4537d91f8cc004160521f85 100644 (file)
@@ -67,8 +67,6 @@ struct ipvl_dev {
        struct ipvl_port        *port;
        struct net_device       *phy_dev;
        struct list_head        addrs;
-       int                     ipv4cnt;
-       int                     ipv6cnt;
        struct ipvl_pcpu_stats  __percpu *pcpu_stats;
        DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
        netdev_features_t       sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
        return rcu_dereference(d->rx_handler_data);
 }
 
+static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
+{
+       return rcu_dereference_bh(d->rx_handler_data);
+}
+
 static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
 {
        return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
 struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
                                        const void *iaddr, bool is_v6);
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);
+void ipvlan_ht_addr_del(struct ipvl_addr *addr);
 #endif /* __IPVLAN_H */
index 8afbedad620d9ed27576dc6426878ae858979a51..207f62e8de9a93415cc76eb5fd75f987b3de53b6 100644 (file)
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
                hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
 }
 
-void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
+void ipvlan_ht_addr_del(struct ipvl_addr *addr)
 {
        hlist_del_init_rcu(&addr->hlnode);
-       if (sync)
-               synchronize_rcu();
 }
 
 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
-       struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev);
+       struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
 
        if (!port)
                goto out;
index 1acc283160d924e0754f3da99fc120c638c257a2..20b58bdecf7540100edc5522e74804e6a7544d95 100644 (file)
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
        else
                dev->flags &= ~IFF_NOARP;
 
-       if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
-               list_for_each_entry(addr, &ipvlan->addrs, anode)
-                       ipvlan_ht_addr_add(ipvlan, addr);
-       }
+       list_for_each_entry(addr, &ipvlan->addrs, anode)
+               ipvlan_ht_addr_add(ipvlan, addr);
+
        return dev_uc_add(phy_dev, phy_dev->dev_addr);
 }
 
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
 
        dev_uc_del(phy_dev, phy_dev->dev_addr);
 
-       if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
-               list_for_each_entry(addr, &ipvlan->addrs, anode)
-                       ipvlan_ht_addr_del(addr, !dev->dismantle);
-       }
+       list_for_each_entry(addr, &ipvlan->addrs, anode)
+               ipvlan_ht_addr_del(addr);
+
        return 0;
 }
 
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan->port = port;
        ipvlan->sfeatures = IPVLAN_FEATURES;
        INIT_LIST_HEAD(&ipvlan->addrs);
-       ipvlan->ipv4cnt = 0;
-       ipvlan->ipv6cnt = 0;
 
        /* TODO Probably put random address here to be presented to the
         * world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
        struct ipvl_dev *ipvlan = netdev_priv(dev);
        struct ipvl_addr *addr, *next;
 
-       if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
-               list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
-                       ipvlan_ht_addr_del(addr, !dev->dismantle);
-                       list_del(&addr->anode);
-               }
+       list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
+               ipvlan_ht_addr_del(addr);
+               list_del(&addr->anode);
+               kfree_rcu(addr, rcu);
        }
+
        list_del_rcu(&ipvlan->pnode);
        unregister_netdevice_queue(dev, head);
        netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
        memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
        addr->atype = IPVL_IPV6;
        list_add_tail(&addr->anode, &ipvlan->addrs);
-       ipvlan->ipv6cnt++;
+
        /* If the interface is not up, the address will be added to the hash
         * list by ipvlan_open.
         */
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
        if (!addr)
                return;
 
-       ipvlan_ht_addr_del(addr, true);
+       ipvlan_ht_addr_del(addr);
        list_del(&addr->anode);
-       ipvlan->ipv6cnt--;
-       WARN_ON(ipvlan->ipv6cnt < 0);
        kfree_rcu(addr, rcu);
 
        return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
        struct net_device *dev = (struct net_device *)if6->idev->dev;
        struct ipvl_dev *ipvlan = netdev_priv(dev);
 
+       /* FIXME IPv6 autoconf calls us from bh without RTNL */
+       if (in_softirq())
+               return NOTIFY_DONE;
+
        if (!netif_is_ipvlan(dev))
                return NOTIFY_DONE;
 
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
        memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
        addr->atype = IPVL_IPV4;
        list_add_tail(&addr->anode, &ipvlan->addrs);
-       ipvlan->ipv4cnt++;
+
        /* If the interface is not up, the address will be added to the hash
         * list by ipvlan_open.
         */
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
        if (!addr)
                return;
 
-       ipvlan_ht_addr_del(addr, true);
+       ipvlan_ht_addr_del(addr);
        list_del(&addr->anode);
-       ipvlan->ipv4cnt--;
-       WARN_ON(ipvlan->ipv4cnt < 0);
        kfree_rcu(addr, rcu);
 
        return;
index 9f59f17dc317a254641bdc48973ce78e089761bc..47da43595ac271c570e8536a2e1a3b697a9dbd79 100644 (file)
@@ -1047,6 +1047,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_netpoll_cleanup    = macvlan_dev_netpoll_cleanup,
 #endif
        .ndo_get_iflink         = macvlan_dev_get_iflink,
+       .ndo_features_check     = passthru_features_check,
 };
 
 void macvlan_common_setup(struct net_device *dev)
index 3b933bb5a8d5084208c2a9903ab44317cae5d146..edd77342773a8d4ef0713717adfa5bcf6bdf44f7 100644 (file)
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        struct virtio_net_hdr vnet_hdr = { 0 };
        int vnet_hdr_len = 0;
        int copylen = 0;
+       int depth;
        bool zerocopy = false;
        size_t linear;
        ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
 
        skb_probe_transport_header(skb, ETH_HLEN);
 
+       /* Move network header to the right position for VLAN tagged packets */
+       if ((skb->protocol == htons(ETH_P_8021Q) ||
+            skb->protocol == htons(ETH_P_8021AD)) &&
+           __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+               skb_set_network_header(skb, depth);
+
        rcu_read_lock();
        vlan = rcu_dereference(q->vlan);
        /* copy skb_ubuf_info for callback when skb has no error */
index cb86d7a0154228f5c3711898072a0e5f5d4d6a81..c07030dbe7484b50e1f49c9dff6a35ba6cf0fc95 100644 (file)
@@ -14,6 +14,11 @@ if PHYLIB
 
 comment "MII PHY device drivers"
 
+config AQUANTIA_PHY
+        tristate "Drivers for the Aquantia PHYs"
+        ---help---
+          Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+
 config AT803X_PHY
        tristate "Drivers for Atheros AT803X PHYs"
        ---help---
@@ -54,6 +59,11 @@ config VITESSE_PHY
         ---help---
           Currently supports the vsc8244
 
+config TERANETICS_PHY
+        tristate "Drivers for the Teranetics PHYs"
+        ---help---
+          Currently supports the Teranetics TN2020
+
 config SMSC_PHY
        tristate "Drivers for SMSC PHYs"
        ---help---
@@ -145,13 +155,13 @@ config MDIO_GPIO
          will be called mdio-gpio.
 
 config MDIO_OCTEON
-       tristate "Support for MDIO buses on Octeon SOCs"
-       depends on CAVIUM_OCTEON_SOC
-       default y
+       tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
+       depends on 64BIT
        help
 
-         This module provides a driver for the Octeon MDIO busses.
-         It is required by the Octeon Ethernet device drivers.
+         This module provides a driver for the Octeon and ThunderX MDIO
+         busses. It is required by the Octeon and ThunderX ethernet device
+         drivers.
 
          If in doubt, say Y.
 
index fcc25a0c45cd01de449f677bb424cbe158a416c7..9bb103358c74d2c87054c08f0c35d1d83609a43a 100644 (file)
@@ -3,12 +3,14 @@
 libphy-objs                    := phy.o phy_device.o mdio_bus.o
 
 obj-$(CONFIG_PHYLIB)           += libphy.o
+obj-$(CONFIG_AQUANTIA_PHY)     += aquantia.o
 obj-$(CONFIG_MARVELL_PHY)      += marvell.o
 obj-$(CONFIG_DAVICOM_PHY)      += davicom.o
 obj-$(CONFIG_CICADA_PHY)       += cicada.o
 obj-$(CONFIG_LXT_PHY)          += lxt.o
 obj-$(CONFIG_QSEMI_PHY)                += qsemi.o
 obj-$(CONFIG_SMSC_PHY)         += smsc.o
+obj-$(CONFIG_TERANETICS_PHY)   += teranetics.o
 obj-$(CONFIG_VITESSE_PHY)      += vitesse.o
 obj-$(CONFIG_BROADCOM_PHY)     += broadcom.o
 obj-$(CONFIG_BCM63XX_PHY)      += bcm63xx.o
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
new file mode 100644 (file)
index 0000000..73d347d
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Driver for Aquantia PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+
+#define PHY_ID_AQ1202  0x03a1b445
+#define PHY_ID_AQ2104  0x03a1b460
+#define PHY_ID_AQR105  0x03a1b4a2
+#define PHY_ID_AQR405  0x03a1b4b0
+
+#define PHY_AQUANTIA_FEATURES  (SUPPORTED_10000baseT_Full | \
+                                SUPPORTED_1000baseT_Full | \
+                                SUPPORTED_100baseT_Full | \
+                                PHY_DEFAULT_FEATURES)
+
+static int aquantia_config_aneg(struct phy_device *phydev)
+{
+       phydev->supported = PHY_AQUANTIA_FEATURES;
+       phydev->advertising = phydev->supported;
+
+       return 0;
+}
+
+static int aquantia_aneg_done(struct phy_device *phydev)
+{
+       int reg;
+
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+       return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+}
+
+static int aquantia_read_status(struct phy_device *phydev)
+{
+       int reg;
+
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+       if (reg & MDIO_STAT1_LSTATUS)
+               phydev->link = 1;
+       else
+               phydev->link = 0;
+
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+       mdelay(10);
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+
+       switch (reg) {
+       case 0x9:
+               phydev->speed = SPEED_2500;
+               break;
+       case 0x5:
+               phydev->speed = SPEED_1000;
+               break;
+       case 0x3:
+               phydev->speed = SPEED_100;
+               break;
+       case 0x7:
+       default:
+               phydev->speed = SPEED_10000;
+               break;
+       }
+       phydev->duplex = DUPLEX_FULL;
+
+       return 0;
+}
+
+static struct phy_driver aquantia_driver[] = {
+{
+       .phy_id         = PHY_ID_AQ1202,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Aquantia AQ1202",
+       .features       = PHY_AQUANTIA_FEATURES,
+       .aneg_done      = aquantia_aneg_done,
+       .config_aneg    = aquantia_config_aneg,
+       .read_status    = aquantia_read_status,
+       .driver         = { .owner = THIS_MODULE,},
+},
+{
+       .phy_id         = PHY_ID_AQ2104,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Aquantia AQ2104",
+       .features       = PHY_AQUANTIA_FEATURES,
+       .aneg_done      = aquantia_aneg_done,
+       .config_aneg    = aquantia_config_aneg,
+       .read_status    = aquantia_read_status,
+       .driver         = { .owner = THIS_MODULE,},
+},
+{
+       .phy_id         = PHY_ID_AQR105,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Aquantia AQR105",
+       .features       = PHY_AQUANTIA_FEATURES,
+       .aneg_done      = aquantia_aneg_done,
+       .config_aneg    = aquantia_config_aneg,
+       .read_status    = aquantia_read_status,
+       .driver         = { .owner = THIS_MODULE,},
+},
+{
+       .phy_id         = PHY_ID_AQR405,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Aquantia AQR405",
+       .features       = PHY_AQUANTIA_FEATURES,
+       .aneg_done      = aquantia_aneg_done,
+       .config_aneg    = aquantia_config_aneg,
+       .read_status    = aquantia_read_status,
+       .driver         = { .owner = THIS_MODULE,},
+},
+};
+
+static int __init aquantia_init(void)
+{
+       return phy_drivers_register(aquantia_driver,
+                                   ARRAY_SIZE(aquantia_driver));
+}
+
+static void __exit aquantia_exit(void)
+{
+       return phy_drivers_unregister(aquantia_driver,
+                                     ARRAY_SIZE(aquantia_driver));
+}
+
+module_init(aquantia_init);
+module_exit(aquantia_exit);
+
+static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
+       { PHY_ID_AQ1202, 0xfffffff0 },
+       { PHY_ID_AQ2104, 0xfffffff0 },
+       { PHY_ID_AQR105, 0xfffffff0 },
+       { PHY_ID_AQR405, 0xfffffff0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(mdio, aquantia_tbl);
+
+MODULE_DESCRIPTION("Aquantia PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
index 00cb41e713123689803e5dddfa527c3ebaee26ae..185b03c08e16ce9140b2b0d91abdfacd5b9258a1 100644 (file)
@@ -1449,17 +1449,9 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
        info->rx_filters =
                (1 << HWTSTAMP_FILTER_NONE) |
                (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
                (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
                (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
        return 0;
 }
 
index c7a12e2e07b7670a55a682ea2988cc4cca86d7b7..32f10662f4ac7c189e3fa666f0891f515d2a4ca1 100644 (file)
@@ -123,12 +123,8 @@ static int dp83867_of_init(struct phy_device *phydev)
        if (ret)
                return ret;
 
-       ret = of_property_read_u32(of_node, "ti,fifo-depth",
+       return of_property_read_u32(of_node, "ti,fifo-depth",
                                   &dp83867->fifo_depth);
-       if (ret)
-               return ret;
-
-       return 0;
 }
 #else
 static int dp83867_of_init(struct phy_device *phydev)
@@ -164,7 +160,7 @@ static int dp83867_config_init(struct phy_device *phydev)
                        return ret;
        }
 
-       if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+       if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
            (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
                val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
                                            DP83867_DEVADDR, phydev->addr);
index 1960b46add65b3b89f122cc401c872050cebdbe4..479b93f9581c4088e784340220a4b61837e9dbde 100644 (file)
@@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
        u16 lpagb = 0;
        u16 lpa = 0;
 
+       if (!fp->status.link)
+               goto done;
+       bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
+
        if (fp->status.duplex) {
                bmcr |= BMCR_FULLDPLX;
 
@@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
                }
        }
 
-       if (fp->status.link)
-               bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
-
        if (fp->status.pause)
                lpa |= LPA_PAUSE_CAP;
 
        if (fp->status.asym_pause)
                lpa |= LPA_PAUSE_ASYM;
 
+done:
        fp->regs[MII_PHYSID1] = 0;
        fp->regs[MII_PHYSID2] = 0;
 
index f721444c2b0a9413dd0bac8c7e61099cf6d3789b..3320a179ee360c6b3e8d90b6355f5cd18c8b4630 100644 (file)
@@ -48,6 +48,8 @@
 #define MII_M1011_IMASK_CLEAR          0x0000
 
 #define MII_M1011_PHY_SCR              0x10
+#define MII_M1011_PHY_SCR_MDI          0x0000
+#define MII_M1011_PHY_SCR_MDI_X                0x0020
 #define MII_M1011_PHY_SCR_AUTO_CROSS   0x0060
 
 #define MII_M1145_PHY_EXT_SR           0x1b
@@ -159,6 +161,43 @@ static int marvell_config_intr(struct phy_device *phydev)
        return err;
 }
 
+static int marvell_set_polarity(struct phy_device *phydev, int polarity)
+{
+       int reg;
+       int err;
+       int val;
+
+       /* get the current settings */
+       reg = phy_read(phydev, MII_M1011_PHY_SCR);
+       if (reg < 0)
+               return reg;
+
+       val = reg;
+       val &= ~MII_M1011_PHY_SCR_AUTO_CROSS;
+       switch (polarity) {
+       case ETH_TP_MDI:
+               val |= MII_M1011_PHY_SCR_MDI;
+               break;
+       case ETH_TP_MDI_X:
+               val |= MII_M1011_PHY_SCR_MDI_X;
+               break;
+       case ETH_TP_MDI_AUTO:
+       case ETH_TP_MDI_INVALID:
+       default:
+               val |= MII_M1011_PHY_SCR_AUTO_CROSS;
+               break;
+       }
+
+       if (val != reg) {
+               /* Set the new polarity value in the register */
+               err = phy_write(phydev, MII_M1011_PHY_SCR, val);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int marvell_config_aneg(struct phy_device *phydev)
 {
        int err;
@@ -191,8 +230,7 @@ static int marvell_config_aneg(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       err = phy_write(phydev, MII_M1011_PHY_SCR,
-                       MII_M1011_PHY_SCR_AUTO_CROSS);
+       err = marvell_set_polarity(phydev, phydev->mdix);
        if (err < 0)
                return err;
 
index c838ad6155f7863cbed177f35c4416fad1c0ddb5..fcf4e4df7cc867c25cdb75a912a41d1a1bf58560 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/of_address.h>
 #include <linux/of_mdio.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/phy.h>
 #include <linux/io.h>
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
 #include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-smix-defs.h>
+#endif
 
-#define DRV_VERSION "1.0"
-#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+#define DRV_VERSION "1.1"
+#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver"
 
 #define SMI_CMD                0x0
 #define SMI_WR_DAT     0x8
 #define SMI_CLK                0x18
 #define SMI_EN         0x20
 
+#ifdef __BIG_ENDIAN_BITFIELD
+#define OCT_MDIO_BITFIELD_FIELD(field, more)   \
+       field;                                  \
+       more
+
+#else
+#define OCT_MDIO_BITFIELD_FIELD(field, more)   \
+       more                                    \
+       field;
+
+#endif
+
+union cvmx_smix_clk {
+       u64 u64;
+       struct cvmx_smix_clk_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
+         OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
+         OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
+         OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
+         OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
+         ;))))))))))
+       } s;
+};
+
+union cvmx_smix_cmd {
+       u64 u64;
+       struct cvmx_smix_cmd_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+         OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
+         OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
+         OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
+         ;))))))
+       } s;
+};
+
+union cvmx_smix_en {
+       u64 u64;
+       struct cvmx_smix_en_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
+         OCT_MDIO_BITFIELD_FIELD(u64 en:1,
+         ;))
+       } s;
+};
+
+union cvmx_smix_rd_dat {
+       u64 u64;
+       struct cvmx_smix_rd_dat_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+         OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+         ;))))
+       } s;
+};
+
+union cvmx_smix_wr_dat {
+       u64 u64;
+       struct cvmx_smix_wr_dat_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+         OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+         ;))))
+       } s;
+};
+
 enum octeon_mdiobus_mode {
        UNINIT = 0,
        C22,
@@ -41,6 +116,21 @@ struct octeon_mdiobus {
        int phy_irq[PHY_MAX_ADDR];
 };
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+static void oct_mdio_writeq(u64 val, u64 addr)
+{
+       cvmx_write_csr(addr, val);
+}
+
+static u64 oct_mdio_readq(u64 addr)
+{
+       return cvmx_read_csr(addr);
+}
+#else
+#define oct_mdio_writeq(val, addr)     writeq_relaxed(val, (void *)addr)
+#define oct_mdio_readq(addr)           readq_relaxed((void *)addr)
+#endif
+
 static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
                                    enum octeon_mdiobus_mode m)
 {
@@ -49,10 +139,10 @@ static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
        if (m == p->mode)
                return;
 
-       smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
+       smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
        smi_clk.s.mode = (m == C45) ? 1 : 0;
        smi_clk.s.preamble = 1;
-       cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
+       oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
        p->mode = m;
 }
 
@@ -67,7 +157,7 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
 
        smi_wr.u64 = 0;
        smi_wr.s.dat = regnum & 0xffff;
-       cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+       oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
        regnum = (regnum >> 16) & 0x1f;
 
@@ -75,14 +165,14 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
        smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
        smi_cmd.s.phy_adr = phy_id;
        smi_cmd.s.reg_adr = regnum;
-       cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+       oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
        do {
                /* Wait 1000 clocks so we don't saturate the RSL bus
                 * doing reads.
                 */
                __delay(1000);
-               smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+               smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
        } while (smi_wr.s.pending && --timeout);
 
        if (timeout <= 0)
@@ -114,14 +204,14 @@ static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
        smi_cmd.s.phy_op = op;
        smi_cmd.s.phy_adr = phy_id;
        smi_cmd.s.reg_adr = regnum;
-       cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+       oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
        do {
                /* Wait 1000 clocks so we don't saturate the RSL bus
                 * doing reads.
                 */
                __delay(1000);
-               smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT);
+               smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
        } while (smi_rd.s.pending && --timeout);
 
        if (smi_rd.s.val)
@@ -153,20 +243,20 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
 
        smi_wr.u64 = 0;
        smi_wr.s.dat = val;
-       cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+       oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
        smi_cmd.u64 = 0;
        smi_cmd.s.phy_op = op;
        smi_cmd.s.phy_adr = phy_id;
        smi_cmd.s.reg_adr = regnum;
-       cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+       oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
        do {
                /* Wait 1000 clocks so we don't saturate the RSL bus
                 * doing reads.
                 */
                __delay(1000);
-               smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+               smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
        } while (smi_wr.s.pending && --timeout);
 
        if (timeout <= 0)
@@ -187,30 +277,34 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
        if (res_mem == NULL) {
                dev_err(&pdev->dev, "found no memory resource\n");
-               err = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
+
        bus->mdio_phys = res_mem->start;
        bus->regsize = resource_size(res_mem);
+
        if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
                                     res_mem->name)) {
                dev_err(&pdev->dev, "request_mem_region failed\n");
-               goto fail;
+               return -ENXIO;
        }
+
        bus->register_base =
                (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
+       if (!bus->register_base) {
+               dev_err(&pdev->dev, "dev_ioremap failed\n");
+               return -ENOMEM;
+       }
 
        bus->mii_bus = mdiobus_alloc();
-
        if (!bus->mii_bus)
                goto fail;
 
        smi_en.u64 = 0;
        smi_en.s.en = 1;
-       cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+       oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 
        bus->mii_bus->priv = bus;
        bus->mii_bus->irq = bus->phy_irq;
@@ -234,7 +328,7 @@ fail_register:
        mdiobus_free(bus->mii_bus);
 fail:
        smi_en.u64 = 0;
-       cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+       oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
        return err;
 }
 
@@ -248,7 +342,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
        mdiobus_unregister(bus->mii_bus);
        mdiobus_free(bus->mii_bus);
        smi_en.u64 = 0;
-       cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+       oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
        return 0;
 }
 
index 095ef3fe369af5ebe08254384abc38176df1aef1..46a14cbb021541095a22016a7ad712d79db9307a 100644 (file)
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
 {
        struct phy_device *phydev = to_phy_device(dev);
        struct phy_driver *phydrv = to_phy_driver(drv);
+       const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
+       int i;
 
        if (of_driver_match_device(dev, drv))
                return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
        if (phydrv->match_phy_device)
                return phydrv->match_phy_device(phydev);
 
-       return (phydrv->phy_id & phydrv->phy_id_mask) ==
-               (phydev->phy_id & phydrv->phy_id_mask);
+       if (phydev->is_c45) {
+               for (i = 1; i < num_ids; i++) {
+                       if (!(phydev->c45_ids.devices_in_package & (1 << i)))
+                               continue;
+
+                       if ((phydrv->phy_id & phydrv->phy_id_mask) ==
+                           (phydev->c45_ids.device_ids[i] &
+                            phydrv->phy_id_mask))
+                               return 1;
+               }
+               return 0;
+       } else {
+               return (phydrv->phy_id & phydrv->phy_id_mask) ==
+                       (phydev->phy_id & phydrv->phy_id_mask);
+       }
 }
 
 #ifdef CONFIG_PM
index b2197b506acbe86f3540d5ae1d8334129c2bbe57..84b1fba58ac3c8efcbbb0bf9311b442ac52614c1 100644 (file)
@@ -353,6 +353,8 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 
        phydev->duplex = cmd->duplex;
 
+       phydev->mdix = cmd->eth_tp_mdix_ctrl;
+
        /* Restart the PHY */
        phy_start_aneg(phydev);
 
@@ -377,6 +379,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
        cmd->transceiver = phy_is_internal(phydev) ?
                XCVR_INTERNAL : XCVR_EXTERNAL;
        cmd->autoneg = phydev->autoneg;
+       cmd->eth_tp_mdix_ctrl = phydev->mdix;
 
        return 0;
 }
index 46530159256b3c8c09ad528caf38a5a7cfdb8295..f091d691cf6f1d1961f8e92d6107abdc7e9de9b0 100644 (file)
@@ -209,8 +209,6 @@ static int ks8995_reset(struct ks8995_switch *ks)
        return ks8995_start(ks);
 }
 
-/* ------------------------------------------------------------------------ */
-
 static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
        struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -220,19 +218,9 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
        dev = container_of(kobj, struct device, kobj);
        ks8995 = dev_get_drvdata(dev);
 
-       if (unlikely(off > ks8995->regs_attr.size))
-               return 0;
-
-       if ((off + count) > ks8995->regs_attr.size)
-               count = ks8995->regs_attr.size - off;
-
-       if (unlikely(!count))
-               return count;
-
        return ks8995_read(ks8995, buf, off, count);
 }
 
-
 static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
        struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -242,19 +230,9 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
        dev = container_of(kobj, struct device, kobj);
        ks8995 = dev_get_drvdata(dev);
 
-       if (unlikely(off >= ks8995->regs_attr.size))
-               return -EFBIG;
-
-       if ((off + count) > ks8995->regs_attr.size)
-               count = ks8995->regs_attr.size - off;
-
-       if (unlikely(!count))
-               return count;
-
        return ks8995_write(ks8995, buf, off, count);
 }
 
-
 static const struct bin_attribute ks8995_registers_attr = {
        .attr = {
                .name   = "registers",
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
new file mode 100644 (file)
index 0000000..91e1bec
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Driver for Teranetics PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+MODULE_DESCRIPTION("Teranetics PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
+
+#define PHY_ID_TN2020  0x00a19410
+#define MDIO_PHYXS_LNSTAT_SYNC0        0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1        0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2        0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3        0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+#define MDIO_PHYXS_LANE_READY  (MDIO_PHYXS_LNSTAT_SYNC0 | \
+                               MDIO_PHYXS_LNSTAT_SYNC1 | \
+                               MDIO_PHYXS_LNSTAT_SYNC2 | \
+                               MDIO_PHYXS_LNSTAT_SYNC3 | \
+                               MDIO_PHYXS_LNSTAT_ALIGN)
+
+static int teranetics_config_init(struct phy_device *phydev)
+{
+       phydev->supported = SUPPORTED_10000baseT_Full;
+       phydev->advertising = SUPPORTED_10000baseT_Full;
+
+       return 0;
+}
+
+static int teranetics_soft_reset(struct phy_device *phydev)
+{
+       return 0;
+}
+
+static int teranetics_aneg_done(struct phy_device *phydev)
+{
+       int reg;
+
+       /* auto negotiation state can only be checked when using copper
+        * port, if using fiber port, just lie it's done.
+        */
+       if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+               reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+               return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+       }
+
+       return 1;
+}
+
+static int teranetics_config_aneg(struct phy_device *phydev)
+{
+       return 0;
+}
+
+static int teranetics_read_status(struct phy_device *phydev)
+{
+       int reg;
+
+       phydev->link = 1;
+
+       phydev->speed = SPEED_10000;
+       phydev->duplex = DUPLEX_FULL;
+
+       if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+               reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT);
+               if (reg < 0 ||
+                   !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) {
+                       phydev->link = 0;
+                       return 0;
+               }
+
+               reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+               if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+                       phydev->link = 0;
+       }
+
+       return 0;
+}
+
+static int teranetics_match_phy_device(struct phy_device *phydev)
+{
+       return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
+}
+
+static struct phy_driver teranetics_driver[] = {
+{
+       .phy_id         = PHY_ID_TN2020,
+       .phy_id_mask    = 0xffffffff,
+       .name           = "Teranetics TN2020",
+       .soft_reset     = teranetics_soft_reset,
+       .aneg_done      = teranetics_aneg_done,
+       .config_init    = teranetics_config_init,
+       .config_aneg    = teranetics_config_aneg,
+       .read_status    = teranetics_read_status,
+       .match_phy_device = teranetics_match_phy_device,
+       .driver         = { .owner = THIS_MODULE,},
+},
+};
+
+static int __init teranetics_init(void)
+{
+       return phy_drivers_register(teranetics_driver,
+                                   ARRAY_SIZE(teranetics_driver));
+}
+
+static void __exit teranetics_exit(void)
+{
+       return phy_drivers_unregister(teranetics_driver,
+                                     ARRAY_SIZE(teranetics_driver));
+}
+
+module_init(teranetics_init);
+module_exit(teranetics_exit);
+
+static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+       { PHY_ID_TN2020, 0xffffffff },
+       { }
+};
+
+MODULE_DEVICE_TABLE(mdio, teranetics_tbl);
index 06a039414628de9a9d91c73e9f8790c23fce71a3..976aa97042972880679ae61fa6fa9f90f12b51b1 100644 (file)
@@ -961,6 +961,7 @@ static const struct net_device_ops tap_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = tun_poll_controller,
 #endif
+       .ndo_features_check     = passthru_features_check,
 };
 
 static void tun_flow_init(struct tun_struct *tun)
index 7ba8d0885f120156c47f44884212a2fd73f604b9..1610b79ae3866725a12f9af8a2ed83255999217a 100644 (file)
@@ -106,6 +106,16 @@ config USB_RTL8152
          To compile this driver as a module, choose M here: the
          module will be called r8152.
 
+config USB_LAN78XX
+       tristate "Microchip LAN78XX Based USB Ethernet Adapters"
+       select MII
+       help
+         This option adds support for Microchip LAN78XX based USB 2
+         & USB 3 10/100/1000 Ethernet adapters.
+
+         To compile this driver as a module, choose M here: the
+         module will be called lan78xx.
+
 config USB_USBNET
        tristate "Multi-purpose USB Networking Framework"
        select MII
index e2797f1e1b31ee51f82c11d50b23e6bd274d29ab..cf6a0e610a7fcd8665ec93324997ed1db0486f69 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS)       += pegasus.o
 obj-$(CONFIG_USB_RTL8150)      += rtl8150.o
 obj-$(CONFIG_USB_RTL8152)      += r8152.o
 obj-$(CONFIG_USB_HSO)          += hso.o
+obj-$(CONFIG_USB_LAN78XX)      += lan78xx.o
 obj-$(CONFIG_USB_NET_AX8817X)  += asix.o
 asix-y := asix_devices.o asix_common.o ax88172a.o
 obj-$(CONFIG_USB_NET_AX88179_178A)      += ax88179_178a.o
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
new file mode 100644 (file)
index 0000000..ec8bd34
--- /dev/null
@@ -0,0 +1,3530 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <net/ip6_checksum.h>
+#include "lan78xx.h"
+
+#define DRIVER_AUTHOR  "WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC    "LAN78XX USB 3.0 Gigabit Ethernet Devices"
+#define DRIVER_NAME    "lan78xx"
+#define DRIVER_VERSION "1.0.0"
+
+#define TX_TIMEOUT_JIFFIES             (5 * HZ)
+#define THROTTLE_JIFFIES               (HZ / 8)
+#define UNLINK_TIMEOUT_MS              3
+
+#define RX_MAX_QUEUE_MEMORY            (60 * 1518)
+
+#define SS_USB_PKT_SIZE                        (1024)
+#define HS_USB_PKT_SIZE                        (512)
+#define FS_USB_PKT_SIZE                        (64)
+
+#define MAX_RX_FIFO_SIZE               (12 * 1024)
+#define MAX_TX_FIFO_SIZE               (12 * 1024)
+#define DEFAULT_BURST_CAP_SIZE         (MAX_TX_FIFO_SIZE)
+#define DEFAULT_BULK_IN_DELAY          (0x0800)
+#define MAX_SINGLE_PACKET_SIZE         (9000)
+#define DEFAULT_TX_CSUM_ENABLE         (true)
+#define DEFAULT_RX_CSUM_ENABLE         (true)
+#define DEFAULT_TSO_CSUM_ENABLE                (true)
+#define DEFAULT_VLAN_FILTER_ENABLE     (true)
+#define INTERNAL_PHY_ID                        (2)     /* 2: GMII */
+#define TX_OVERHEAD                    (8)
+#define RXW_PADDING                    2
+
+#define LAN78XX_USB_VENDOR_ID          (0x0424)
+#define LAN7800_USB_PRODUCT_ID         (0x7800)
+#define LAN7850_USB_PRODUCT_ID         (0x7850)
+#define LAN78XX_EEPROM_MAGIC           (0x78A5)
+#define LAN78XX_OTP_MAGIC              (0x78F3)
+
+#define        MII_READ                        1
+#define        MII_WRITE                       0
+
+#define EEPROM_INDICATOR               (0xA5)
+#define EEPROM_MAC_OFFSET              (0x01)
+#define MAX_EEPROM_SIZE                        512
+#define OTP_INDICATOR_1                        (0xF3)
+#define OTP_INDICATOR_2                        (0xF7)
+
+#define WAKE_ALL                       (WAKE_PHY | WAKE_UCAST | \
+                                        WAKE_MCAST | WAKE_BCAST | \
+                                        WAKE_ARP | WAKE_MAGIC)
+
+/* USB related defines */
+#define BULK_IN_PIPE                   1
+#define BULK_OUT_PIPE                  2
+
+/* default autosuspend delay (mSec)*/
+#define DEFAULT_AUTOSUSPEND_DELAY      (10 * 1000)
+
+static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
+       "RX FCS Errors",
+       "RX Alignment Errors",
+       "Rx Fragment Errors",
+       "RX Jabber Errors",
+       "RX Undersize Frame Errors",
+       "RX Oversize Frame Errors",
+       "RX Dropped Frames",
+       "RX Unicast Byte Count",
+       "RX Broadcast Byte Count",
+       "RX Multicast Byte Count",
+       "RX Unicast Frames",
+       "RX Broadcast Frames",
+       "RX Multicast Frames",
+       "RX Pause Frames",
+       "RX 64 Byte Frames",
+       "RX 65 - 127 Byte Frames",
+       "RX 128 - 255 Byte Frames",
+       "RX 256 - 511 Bytes Frames",
+       "RX 512 - 1023 Byte Frames",
+       "RX 1024 - 1518 Byte Frames",
+       "RX Greater 1518 Byte Frames",
+       "EEE RX LPI Transitions",
+       "EEE RX LPI Time",
+       "TX FCS Errors",
+       "TX Excess Deferral Errors",
+       "TX Carrier Errors",
+       "TX Bad Byte Count",
+       "TX Single Collisions",
+       "TX Multiple Collisions",
+       "TX Excessive Collision",
+       "TX Late Collisions",
+       "TX Unicast Byte Count",
+       "TX Broadcast Byte Count",
+       "TX Multicast Byte Count",
+       "TX Unicast Frames",
+       "TX Broadcast Frames",
+       "TX Multicast Frames",
+       "TX Pause Frames",
+       "TX 64 Byte Frames",
+       "TX 65 - 127 Byte Frames",
+       "TX 128 - 255 Byte Frames",
+       "TX 256 - 511 Bytes Frames",
+       "TX 512 - 1023 Byte Frames",
+       "TX 1024 - 1518 Byte Frames",
+       "TX Greater 1518 Byte Frames",
+       "EEE TX LPI Transitions",
+       "EEE TX LPI Time",
+};
+
+struct lan78xx_statstage {
+       u32 rx_fcs_errors;
+       u32 rx_alignment_errors;
+       u32 rx_fragment_errors;
+       u32 rx_jabber_errors;
+       u32 rx_undersize_frame_errors;
+       u32 rx_oversize_frame_errors;
+       u32 rx_dropped_frames;
+       u32 rx_unicast_byte_count;
+       u32 rx_broadcast_byte_count;
+       u32 rx_multicast_byte_count;
+       u32 rx_unicast_frames;
+       u32 rx_broadcast_frames;
+       u32 rx_multicast_frames;
+       u32 rx_pause_frames;
+       u32 rx_64_byte_frames;
+       u32 rx_65_127_byte_frames;
+       u32 rx_128_255_byte_frames;
+       u32 rx_256_511_bytes_frames;
+       u32 rx_512_1023_byte_frames;
+       u32 rx_1024_1518_byte_frames;
+       u32 rx_greater_1518_byte_frames;
+       u32 eee_rx_lpi_transitions;
+       u32 eee_rx_lpi_time;
+       u32 tx_fcs_errors;
+       u32 tx_excess_deferral_errors;
+       u32 tx_carrier_errors;
+       u32 tx_bad_byte_count;
+       u32 tx_single_collisions;
+       u32 tx_multiple_collisions;
+       u32 tx_excessive_collision;
+       u32 tx_late_collisions;
+       u32 tx_unicast_byte_count;
+       u32 tx_broadcast_byte_count;
+       u32 tx_multicast_byte_count;
+       u32 tx_unicast_frames;
+       u32 tx_broadcast_frames;
+       u32 tx_multicast_frames;
+       u32 tx_pause_frames;
+       u32 tx_64_byte_frames;
+       u32 tx_65_127_byte_frames;
+       u32 tx_128_255_byte_frames;
+       u32 tx_256_511_bytes_frames;
+       u32 tx_512_1023_byte_frames;
+       u32 tx_1024_1518_byte_frames;
+       u32 tx_greater_1518_byte_frames;
+       u32 eee_tx_lpi_transitions;
+       u32 eee_tx_lpi_time;
+};
+
+struct lan78xx_net;
+
+struct lan78xx_priv {
+       struct lan78xx_net *dev;
+       u32 rfe_ctl;
+       u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
+       u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
+       u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
+       struct mutex dataport_mutex; /* for dataport access */
+       spinlock_t rfe_ctl_lock; /* for rfe register access */
+       struct work_struct set_multicast;
+       struct work_struct set_vlan;
+       u32 wol;
+};
+
+enum skb_state {
+       illegal = 0,
+       tx_start,
+       tx_done,
+       rx_start,
+       rx_done,
+       rx_cleanup,
+       unlink_start
+};
+
+struct skb_data {              /* skb->cb is one of these */
+       struct urb *urb;
+       struct lan78xx_net *dev;
+       enum skb_state state;
+       size_t length;
+};
+
+struct usb_context {
+       struct usb_ctrlrequest req;
+       struct lan78xx_net *dev;
+};
+
+#define EVENT_TX_HALT                  0
+#define EVENT_RX_HALT                  1
+#define EVENT_RX_MEMORY                        2
+#define EVENT_STS_SPLIT                        3
+#define EVENT_LINK_RESET               4
+#define EVENT_RX_PAUSED                        5
+#define EVENT_DEV_WAKING               6
+#define EVENT_DEV_ASLEEP               7
+#define EVENT_DEV_OPEN                 8
+
+struct lan78xx_net {
+       struct net_device       *net;
+       struct usb_device       *udev;
+       struct usb_interface    *intf;
+       void                    *driver_priv;
+
+       int                     rx_qlen;
+       int                     tx_qlen;
+       struct sk_buff_head     rxq;
+       struct sk_buff_head     txq;
+       struct sk_buff_head     done;
+       struct sk_buff_head     rxq_pause;
+       struct sk_buff_head     txq_pend;
+
+       struct tasklet_struct   bh;
+       struct delayed_work     wq;
+
+       struct usb_host_endpoint *ep_blkin;
+       struct usb_host_endpoint *ep_blkout;
+       struct usb_host_endpoint *ep_intr;
+
+       int                     msg_enable;
+
+       struct urb              *urb_intr;
+       struct usb_anchor       deferred;
+
+       struct mutex            phy_mutex; /* for phy access */
+       unsigned                pipe_in, pipe_out, pipe_intr;
+
+       u32                     hard_mtu;       /* count any extra framing */
+       size_t                  rx_urb_size;    /* size for rx urbs */
+
+       unsigned long           flags;
+
+       wait_queue_head_t       *wait;
+       unsigned char           suspend_count;
+
+       unsigned                maxpacket;
+       struct timer_list       delay;
+
+       unsigned long           data[5];
+       struct mii_if_info      mii;
+
+       int                     link_on;
+       u8                      mdix_ctrl;
+};
+
+/* use ethtool to change the level for any given device */
+static int msg_level = -1;
+module_param(msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Override default message level");
+
+static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
+{
+       u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+       int ret;
+
+       BUG_ON(!dev);
+
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+                             USB_VENDOR_REQUEST_READ_REGISTER,
+                             USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                             0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+       if (likely(ret >= 0)) {
+               le32_to_cpus(buf);
+               *data = *buf;
+       } else {
+               netdev_warn(dev->net,
+                           "Failed to read register index 0x%08x. ret = %d",
+                           index, ret);
+       }
+
+       kfree(buf);
+
+       return ret;
+}
+
+static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
+{
+       u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+       int ret;
+
+       BUG_ON(!dev);
+
+       if (!buf)
+               return -ENOMEM;
+
+       *buf = data;
+       cpu_to_le32s(buf);
+
+       ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+                             USB_VENDOR_REQUEST_WRITE_REGISTER,
+                             USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                             0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+       if (unlikely(ret < 0)) {
+               netdev_warn(dev->net,
+                           "Failed to write register index 0x%08x. ret = %d",
+                           index, ret);
+       }
+
+       kfree(buf);
+
+       return ret;
+}
+
+static int lan78xx_read_stats(struct lan78xx_net *dev,
+                             struct lan78xx_statstage *data)
+{
+       int ret = 0;
+       int i;
+       struct lan78xx_statstage *stats;
+       u32 *src;
+       u32 *dst;
+
+       BUG_ON(!dev);
+       BUG_ON(!data);
+       BUG_ON(sizeof(struct lan78xx_statstage) != 0xBC);
+
+       stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+       if (!stats)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev,
+                             usb_rcvctrlpipe(dev->udev, 0),
+                             USB_VENDOR_REQUEST_GET_STATS,
+                             USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                             0,
+                             0,
+                             (void *)stats,
+                             sizeof(*stats),
+                             USB_CTRL_SET_TIMEOUT);
+       if (likely(ret >= 0)) {
+               src = (u32 *)stats;
+               dst = (u32 *)data;
+               for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
+                       le32_to_cpus(&src[i]);
+                       dst[i] = src[i];
+               }
+       } else {
+               netdev_warn(dev->net,
+                           "Failed to read stat ret = 0x%x", ret);
+       }
+
+       kfree(stats);
+
+       return ret;
+}
+
+/* Loop until the read is completed with timeout called with phy_mutex held */
+static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+{
+       unsigned long start_time = jiffies;
+       u32 val;
+       int ret;
+
+       do {
+               ret = lan78xx_read_reg(dev, MII_ACC, &val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               if (!(val & MII_ACC_MII_BUSY_))
+                       return 0;
+       } while (!time_after(jiffies, start_time + HZ));
+
+       return -EIO;
+}
+
+static inline u32 mii_access(int id, int index, int read)
+{
+       u32 ret;
+
+       ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
+       ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
+       if (read)
+               ret |= MII_ACC_MII_READ_;
+       else
+               ret |= MII_ACC_MII_WRITE_;
+       ret |= MII_ACC_MII_BUSY_;
+
+       return ret;
+}
+
+static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       u32 val, addr;
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* set the address, index & direction (read from PHY) */
+       phy_id &= dev->mii.phy_id_mask;
+       idx &= dev->mii.reg_num_mask;
+       addr = mii_access(phy_id, idx, MII_READ);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+       ret = (int)(val & 0xFFFF);
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+       return ret;
+}
+
+static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
+                              int idx, int regval)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       u32 val, addr;
+       int ret;
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+               return;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       val = regval;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       /* set the address, index & direction (write to PHY) */
+       phy_id &= dev->mii.phy_id_mask;
+       idx &= dev->mii.reg_num_mask;
+       addr = mii_access(phy_id, idx, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
+                             int mmddev, int mmdidx, int regval)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       u32 val, addr;
+       int ret;
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+               return;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       mmddev &= 0x1F;
+
+       /* set up device address for MMD */
+       ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* select register of MMD */
+       val = mmdidx;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* select register data for MMD */
+       val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* write to MMD */
+       val = regval;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
+                           int mmddev, int mmdidx)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       u32 val, addr;
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* set up device address for MMD */
+       ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* select register of MMD */
+       val = mmdidx;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* select register data for MMD */
+       val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* set the address, index & direction (read from PHY) */
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* read from MMD */
+       ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+       ret = (int)(val & 0xFFFF);
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+       return ret;
+}
+
+static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
+{
+       unsigned long start_time = jiffies;
+       u32 val;
+       int ret;
+
+       do {
+               ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               if (!(val & E2P_CMD_EPC_BUSY_) ||
+                   (val & E2P_CMD_EPC_TIMEOUT_))
+                       break;
+               usleep_range(40, 100);
+       } while (!time_after(jiffies, start_time + HZ));
+
+       if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+               netdev_warn(dev->net, "EEPROM read operation timeout");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
+{
+       unsigned long start_time = jiffies;
+       u32 val;
+       int ret;
+
+       do {
+               ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               if (!(val & E2P_CMD_EPC_BUSY_))
+                       return 0;
+
+               usleep_range(40, 100);
+       } while (!time_after(jiffies, start_time + HZ));
+
+       netdev_warn(dev->net, "EEPROM is busy");
+       return -EIO;
+}
+
+static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+                                  u32 length, u8 *data)
+{
+       u32 val;
+       int i, ret;
+
+       BUG_ON(!dev);
+       BUG_ON(!data);
+
+       ret = lan78xx_eeprom_confirm_not_busy(dev);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < length; i++) {
+               val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+               val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+               ret = lan78xx_write_reg(dev, E2P_CMD, val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               ret = lan78xx_wait_eeprom(dev);
+               if (ret < 0)
+                       return ret;
+
+               ret = lan78xx_read_reg(dev, E2P_DATA, &val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               data[i] = val & 0xFF;
+               offset++;
+       }
+
+       return 0;
+}
+
+static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
+                              u32 length, u8 *data)
+{
+       u8 sig;
+       int ret;
+
+       ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+       if ((ret == 0) && (sig == EEPROM_INDICATOR))
+               ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
+       else
+               ret = -EINVAL;
+
+       return ret;
+}
+
+static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+                                   u32 length, u8 *data)
+{
+       u32 val;
+       int i, ret;
+
+       BUG_ON(!dev);
+       BUG_ON(!data);
+
+       ret = lan78xx_eeprom_confirm_not_busy(dev);
+       if (ret)
+               return ret;
+
+       /* Issue write/erase enable command */
+       val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+       ret = lan78xx_write_reg(dev, E2P_CMD, val);
+       if (unlikely(ret < 0))
+               return -EIO;
+
+       ret = lan78xx_wait_eeprom(dev);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < length; i++) {
+               /* Fill data register */
+               val = data[i];
+               ret = lan78xx_write_reg(dev, E2P_DATA, val);
+               if (ret < 0)
+                       return ret;
+
+               /* Send "write" command */
+               val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+               val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+               ret = lan78xx_write_reg(dev, E2P_CMD, val);
+               if (ret < 0)
+                       return ret;
+
+               ret = lan78xx_wait_eeprom(dev);
+               if (ret < 0)
+                       return ret;
+
+               offset++;
+       }
+
+       return 0;
+}
+
+static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
+                               u32 length, u8 *data)
+{
+       int i;
+       int ret;
+       u32 buf;
+       unsigned long timeout;
+
+       ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+
+       if (buf & OTP_PWR_DN_PWRDN_N_) {
+               /* clear it and wait to be cleared */
+               ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+
+               timeout = jiffies + HZ;
+               do {
+                       usleep_range(1, 10);
+                       ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+                       if (time_after(jiffies, timeout)) {
+                               netdev_warn(dev->net,
+                                           "timeout on OTP_PWR_DN");
+                               return -EIO;
+                       }
+               } while (buf & OTP_PWR_DN_PWRDN_N_);
+       }
+
+       for (i = 0; i < length; i++) {
+               ret = lan78xx_write_reg(dev, OTP_ADDR1,
+                                       ((offset + i) >> 8) & OTP_ADDR1_15_11);
+               ret = lan78xx_write_reg(dev, OTP_ADDR2,
+                                       ((offset + i) & OTP_ADDR2_10_3));
+
+               ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+               ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+               timeout = jiffies + HZ;
+               do {
+                       udelay(1);
+                       ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+                       if (time_after(jiffies, timeout)) {
+                               netdev_warn(dev->net,
+                                           "timeout on OTP_STATUS");
+                               return -EIO;
+                       }
+               } while (buf & OTP_STATUS_BUSY_);
+
+               ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+
+               data[i] = (u8)(buf & 0xFF);
+       }
+
+       return 0;
+}
+
+static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
+                           u32 length, u8 *data)
+{
+       u8 sig;
+       int ret;
+
+       ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
+
+       if (ret == 0) {
+               if (sig == OTP_INDICATOR_1)
+                       offset = offset;
+               else if (sig == OTP_INDICATOR_2)
+                       offset += 0x100;
+               else
+                       ret = -EINVAL;
+               ret = lan78xx_read_raw_otp(dev, offset, length, data);
+       }
+
+       return ret;
+}
+
+static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
+{
+       int i, ret;
+
+       for (i = 0; i < 100; i++) {
+               u32 dp_sel;
+
+               ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               if (dp_sel & DP_SEL_DPRDY_)
+                       return 0;
+
+               usleep_range(40, 100);
+       }
+
+       netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
+
+       return -EIO;
+}
+
+static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
+                                 u32 addr, u32 length, u32 *buf)
+{
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u32 dp_sel;
+       int i, ret;
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+                       return 0;
+
+       mutex_lock(&pdata->dataport_mutex);
+
+       ret = lan78xx_dataport_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+
+       dp_sel &= ~DP_SEL_RSEL_MASK_;
+       dp_sel |= ram_select;
+       ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+
+       for (i = 0; i < length; i++) {
+               ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+
+               ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+
+               ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+
+               ret = lan78xx_dataport_wait_not_busy(dev);
+               if (ret < 0)
+                       goto done;
+       }
+
+done:
+       mutex_unlock(&pdata->dataport_mutex);
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
+}
+
+static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
+                                   int index, u8 addr[ETH_ALEN])
+{
+       u32     temp;
+
+       if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
+               temp = addr[3];
+               temp = addr[2] | (temp << 8);
+               temp = addr[1] | (temp << 8);
+               temp = addr[0] | (temp << 8);
+               pdata->pfilter_table[index][1] = temp;
+               temp = addr[5];
+               temp = addr[4] | (temp << 8);
+               temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
+               pdata->pfilter_table[index][0] = temp;
+       }
+}
+
+/* returns hash bit number for given MAC address */
+static inline u32 lan78xx_hash(char addr[ETH_ALEN])
+{
+       return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
+}
+
+static void lan78xx_deferred_multicast_write(struct work_struct *param)
+{
+       struct lan78xx_priv *pdata =
+                       container_of(param, struct lan78xx_priv, set_multicast);
+       struct lan78xx_net *dev = pdata->dev;
+       int i;
+       int ret;
+
+       netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
+                 pdata->rfe_ctl);
+
+       lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
+                              DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+
+       for (i = 1; i < NUM_OF_MAF; i++) {
+               ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+               ret = lan78xx_write_reg(dev, MAF_LO(i),
+                                       pdata->pfilter_table[i][1]);
+               ret = lan78xx_write_reg(dev, MAF_HI(i),
+                                       pdata->pfilter_table[i][0]);
+       }
+
+       ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+}
+
+static void lan78xx_set_multicast(struct net_device *netdev)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+       pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
+                           RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
+
+       for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
+                       pdata->mchash_table[i] = 0;
+       /* pfilter_table[0] has own HW address */
+       for (i = 1; i < NUM_OF_MAF; i++) {
+                       pdata->pfilter_table[i][0] =
+                       pdata->pfilter_table[i][1] = 0;
+       }
+
+       pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
+
+       if (dev->net->flags & IFF_PROMISC) {
+               netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+               pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
+       } else {
+               if (dev->net->flags & IFF_ALLMULTI) {
+                       netif_dbg(dev, drv, dev->net,
+                                 "receive all multicast enabled");
+                       pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
+               }
+       }
+
+       if (netdev_mc_count(dev->net)) {
+               struct netdev_hw_addr *ha;
+               int i;
+
+               netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+
+               pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
+
+               i = 1;
+               netdev_for_each_mc_addr(ha, netdev) {
+                       /* set first 32 into Perfect Filter */
+                       if (i < 33) {
+                               lan78xx_set_addr_filter(pdata, i, ha->addr);
+                       } else {
+                               u32 bitnum = lan78xx_hash(ha->addr);
+
+                               pdata->mchash_table[bitnum / 32] |=
+                                                       (1 << (bitnum % 32));
+                               pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
+                       }
+                       i++;
+               }
+       }
+
+       spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+       /* defer register writes to a sleepable context */
+       schedule_work(&pdata->set_multicast);
+}
+
+static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
+                                     u16 lcladv, u16 rmtadv)
+{
+       u32 flow = 0, fct_flow = 0;
+       int ret;
+
+       u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+       if (cap & FLOW_CTRL_TX)
+               flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
+
+       if (cap & FLOW_CTRL_RX)
+               flow |= FLOW_CR_RX_FCEN_;
+
+       if (dev->udev->speed == USB_SPEED_SUPER)
+               fct_flow = 0x817;
+       else if (dev->udev->speed == USB_SPEED_HIGH)
+               fct_flow = 0x211;
+
+       netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
+                 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+                 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+
+       ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+
+       /* threshold value should be set before enabling flow */
+       ret = lan78xx_write_reg(dev, FLOW, flow);
+
+       return 0;
+}
+
+static int lan78xx_link_reset(struct lan78xx_net *dev)
+{
+       struct mii_if_info *mii = &dev->mii;
+       struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+       u16 ladv, radv;
+       int ret;
+       u32 buf;
+
+       /* clear PHY interrupt status */
+       /* VTSE PHY */
+       ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
+       if (unlikely(ret < 0))
+               return -EIO;
+
+       /* clear LAN78xx interrupt status */
+       ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+       if (unlikely(ret < 0))
+               return -EIO;
+
+       if (!mii_link_ok(mii) && dev->link_on) {
+               dev->link_on = false;
+               netif_carrier_off(dev->net);
+
+               /* reset MAC */
+               ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+               if (unlikely(ret < 0))
+                       return -EIO;
+               buf |= MAC_CR_RST_;
+               ret = lan78xx_write_reg(dev, MAC_CR, buf);
+               if (unlikely(ret < 0))
+                       return -EIO;
+       } else if (mii_link_ok(mii) && !dev->link_on) {
+               dev->link_on = true;
+
+               mii_check_media(mii, 1, 1);
+               mii_ethtool_gset(&dev->mii, &ecmd);
+
+               mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+
+               if (dev->udev->speed == USB_SPEED_SUPER) {
+                       if (ethtool_cmd_speed(&ecmd) == 1000) {
+                               /* disable U2 */
+                               ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+                               buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
+                               ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+                               /* enable U1 */
+                               ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+                               buf |= USB_CFG1_DEV_U1_INIT_EN_;
+                               ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+                       } else {
+                               /* enable U1 & U2 */
+                               ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+                               buf |= USB_CFG1_DEV_U2_INIT_EN_;
+                               buf |= USB_CFG1_DEV_U1_INIT_EN_;
+                               ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+                       }
+               }
+
+               ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
+               if (unlikely(ladv < 0))
+                       return -EIO;
+
+               radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
+               if (unlikely(radv < 0))
+                       return -EIO;
+
+               netif_dbg(dev, link, dev->net,
+                         "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
+                         ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
+
+               ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
+               netif_carrier_on(dev->net);
+       }
+
+       return ret;
+}
+
+/* some work can't be done in tasklets, so we use keventd
+ *
+ * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
+ * but tasklet_schedule() doesn't.     hope the failure is rare.
+ */
+void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
+{
+       set_bit(work, &dev->flags);
+       if (!schedule_delayed_work(&dev->wq, 0))
+               netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+}
+
+static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
+{
+       u32 intdata;
+
+       if (urb->actual_length != 4) {
+               netdev_warn(dev->net,
+                           "unexpected urb length %d", urb->actual_length);
+               return;
+       }
+
+       memcpy(&intdata, urb->transfer_buffer, 4);
+       le32_to_cpus(&intdata);
+
+       if (intdata & INT_ENP_PHY_INT) {
+               netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
+                         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+       } else
+               netdev_warn(dev->net,
+                           "unexpected interrupt: 0x%08x\n", intdata);
+}
+
+static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+       return MAX_EEPROM_SIZE;
+}
+
+static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
+                                     struct ethtool_eeprom *ee, u8 *data)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+
+       ee->magic = LAN78XX_EEPROM_MAGIC;
+
+       return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+}
+
+static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
+                                     struct ethtool_eeprom *ee, u8 *data)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+
+       /* Allow entire eeprom update only */
+       if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
+           (ee->offset == 0) &&
+           (ee->len == 512) &&
+           (data[0] == EEPROM_INDICATOR))
+               return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+       else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
+                (ee->offset == 0) &&
+                (ee->len == 512) &&
+                (data[0] == OTP_INDICATOR_1))
+               return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+
+       return -EINVAL;
+}
+
+static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
+                               u8 *data)
+{
+       if (stringset == ETH_SS_STATS)
+               memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+}
+
+static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
+{
+       if (sset == ETH_SS_STATS)
+               return ARRAY_SIZE(lan78xx_gstrings);
+       else
+               return -EOPNOTSUPP;
+}
+
+static void lan78xx_get_stats(struct net_device *netdev,
+                             struct ethtool_stats *stats, u64 *data)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_statstage lan78xx_stat;
+       u32 *p;
+       int i;
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+               return;
+
+       if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
+               p = (u32 *)&lan78xx_stat;
+               for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
+                       data[i] = p[i];
+       }
+
+       usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_get_wol(struct net_device *netdev,
+                           struct ethtool_wolinfo *wol)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       int ret;
+       u32 buf;
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+                       return;
+
+       ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+       if (unlikely(ret < 0)) {
+               wol->supported = 0;
+               wol->wolopts = 0;
+       } else {
+               if (buf & USB_CFG_RMT_WKP_) {
+                       wol->supported = WAKE_ALL;
+                       wol->wolopts = pdata->wol;
+               } else {
+                       wol->supported = 0;
+                       wol->wolopts = 0;
+               }
+       }
+
+       usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_set_wol(struct net_device *netdev,
+                          struct ethtool_wolinfo *wol)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       pdata->wol = 0;
+       if (wol->wolopts & WAKE_UCAST)
+               pdata->wol |= WAKE_UCAST;
+       if (wol->wolopts & WAKE_MCAST)
+               pdata->wol |= WAKE_MCAST;
+       if (wol->wolopts & WAKE_BCAST)
+               pdata->wol |= WAKE_BCAST;
+       if (wol->wolopts & WAKE_MAGIC)
+               pdata->wol |= WAKE_MAGIC;
+       if (wol->wolopts & WAKE_PHY)
+               pdata->wol |= WAKE_PHY;
+       if (wol->wolopts & WAKE_ARP)
+               pdata->wol |= WAKE_ARP;
+
+       device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
+}
+
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       int ret;
+       u32 buf;
+       u32 adv, lpadv;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+       if (buf & MAC_CR_EEE_EN_) {
+               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+                                      PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
+               adv = mmd_eee_adv_to_ethtool_adv_t(buf);
+               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+                                      PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+               lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+               edata->eee_enabled = true;
+               edata->supported = true;
+               edata->eee_active = !!(adv & lpadv);
+               edata->advertised = adv;
+               edata->lp_advertised = lpadv;
+               edata->tx_lpi_enabled = true;
+               /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+               ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
+               edata->tx_lpi_timer = buf;
+       } else {
+               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+                                      PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+               lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+               edata->eee_enabled = false;
+               edata->eee_active = false;
+               edata->supported = false;
+               edata->advertised = 0;
+               edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
+               edata->tx_lpi_enabled = false;
+               edata->tx_lpi_timer = 0;
+       }
+
+       usb_autopm_put_interface(dev->intf);
+
+       return 0;
+}
+
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       int ret;
+       u32 buf;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       if (edata->eee_enabled) {
+               ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+               buf |= MAC_CR_EEE_EN_;
+               ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+               buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+               lan78xx_mmd_write(dev->net, dev->mii.phy_id,
+                                 PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
+       } else {
+               ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+               buf &= ~MAC_CR_EEE_EN_;
+               ret = lan78xx_write_reg(dev, MAC_CR, buf);
+       }
+
+       usb_autopm_put_interface(dev->intf);
+
+       return 0;
+}
+
+static u32 lan78xx_get_link(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       return mii_link_ok(&dev->mii);
+}
+
+int lan78xx_nway_reset(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+               return -EOPNOTSUPP;
+
+       return mii_nway_restart(&dev->mii);
+}
+
+static void lan78xx_get_drvinfo(struct net_device *net,
+                               struct ethtool_drvinfo *info)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+       strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+       usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static u32 lan78xx_get_msglevel(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       return dev->msg_enable;
+}
+
+static void lan78xx_set_msglevel(struct net_device *net, u32 level)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       dev->msg_enable = level;
+}
+
+static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       struct mii_if_info *mii = &dev->mii;
+       int ret;
+       int buf;
+
+       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+               return -EOPNOTSUPP;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       ret = mii_ethtool_gset(&dev->mii, cmd);
+
+       mii->mdio_write(mii->dev, mii->phy_id,
+                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+       buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+       mii->mdio_write(mii->dev, mii->phy_id,
+                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+
+       buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
+       if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
+               cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
+               cmd->eth_tp_mdix = ETH_TP_MDI;
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+       } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
+               cmd->eth_tp_mdix = ETH_TP_MDI_X;
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+       }
+
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
+}
+
+static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       struct mii_if_info *mii = &dev->mii;
+       int ret = 0;
+       int temp;
+
+       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+               return -EOPNOTSUPP;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
+               if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_1);
+                       temp = mii->mdio_read(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL);
+                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL,
+                                       temp | PHY_EXT_MODE_CTRL_MDI_);
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_0);
+               } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_1);
+                       temp = mii->mdio_read(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL);
+                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL,
+                                       temp | PHY_EXT_MODE_CTRL_MDI_X_);
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_0);
+               } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_1);
+                       temp = mii->mdio_read(mii->dev, mii->phy_id,
+                                                       PHY_EXT_MODE_CTRL);
+                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL,
+                                       temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_0);
+               }
+       }
+
+       /* change speed & duplex */
+       ret = mii_ethtool_sset(&dev->mii, cmd);
+
+       if (!cmd->autoneg) {
+               /* force link down */
+               temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+               mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
+                               temp | BMCR_LOOPBACK);
+               mdelay(1);
+               mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
+       }
+
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
+}
+
+static const struct ethtool_ops lan78xx_ethtool_ops = {
+       .get_link       = lan78xx_get_link,
+       .nway_reset     = lan78xx_nway_reset,
+       .get_drvinfo    = lan78xx_get_drvinfo,
+       .get_msglevel   = lan78xx_get_msglevel,
+       .set_msglevel   = lan78xx_set_msglevel,
+       .get_settings   = lan78xx_get_settings,
+       .set_settings   = lan78xx_set_settings,
+       .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
+       .get_eeprom     = lan78xx_ethtool_get_eeprom,
+       .set_eeprom     = lan78xx_ethtool_set_eeprom,
+       .get_ethtool_stats = lan78xx_get_stats,
+       .get_sset_count = lan78xx_get_sset_count,
+       .get_strings    = lan78xx_get_strings,
+       .get_wol        = lan78xx_get_wol,
+       .set_wol        = lan78xx_set_wol,
+       .get_eee        = lan78xx_get_eee,
+       .set_eee        = lan78xx_set_eee,
+};
+
+static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+
+       if (!netif_running(netdev))
+               return -EINVAL;
+
+       return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+{
+       u32 addr_lo, addr_hi;
+       int ret;
+       u8 addr[6];
+
+       ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+       ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+
+       addr[0] = addr_lo & 0xFF;
+       addr[1] = (addr_lo >> 8) & 0xFF;
+       addr[2] = (addr_lo >> 16) & 0xFF;
+       addr[3] = (addr_lo >> 24) & 0xFF;
+       addr[4] = addr_hi & 0xFF;
+       addr[5] = (addr_hi >> 8) & 0xFF;
+
+       if (!is_valid_ether_addr(addr)) {
+               /* reading mac address from EEPROM or OTP */
+               if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+                                        addr) == 0) ||
+                   (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+                                     addr) == 0)) {
+                       if (is_valid_ether_addr(addr)) {
+                               /* eeprom values are valid so use them */
+                               netif_dbg(dev, ifup, dev->net,
+                                         "MAC address read from EEPROM");
+                       } else {
+                               /* generate random MAC */
+                               random_ether_addr(addr);
+                               netif_dbg(dev, ifup, dev->net,
+                                         "MAC address set to random addr");
+                       }
+
+                       addr_lo = addr[0] | (addr[1] << 8) |
+                                 (addr[2] << 16) | (addr[3] << 24);
+                       addr_hi = addr[4] | (addr[5] << 8);
+
+                       ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+                       ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+               } else {
+                       /* generate random MAC */
+                       random_ether_addr(addr);
+                       netif_dbg(dev, ifup, dev->net,
+                                 "MAC address set to random addr");
+               }
+       }
+
+       ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+       ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
+       ether_addr_copy(dev->net->dev_addr, addr);
+}
+
+static void lan78xx_mii_init(struct lan78xx_net *dev)
+{
+       /* Initialize MII structure */
+       dev->mii.dev = dev->net;
+       dev->mii.mdio_read = lan78xx_mdio_read;
+       dev->mii.mdio_write = lan78xx_mdio_write;
+       dev->mii.phy_id_mask = 0x1f;
+       dev->mii.reg_num_mask = 0x1f;
+       dev->mii.phy_id = INTERNAL_PHY_ID;
+       dev->mii.supports_gmii = true;
+}
+
+static int lan78xx_phy_init(struct lan78xx_net *dev)
+{
+       int temp;
+       struct mii_if_info *mii = &dev->mii;
+
+       if ((!mii->mdio_write) || (!mii->mdio_read))
+               return -EOPNOTSUPP;
+
+       temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+       temp |= ADVERTISE_ALL;
+       mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
+                       temp | ADVERTISE_CSMA |
+                       ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+       /* set to AUTOMDIX */
+       mii->mdio_write(mii->dev, mii->phy_id,
+                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+       temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+       mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
+                       temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+       mii->mdio_write(mii->dev, mii->phy_id,
+                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+       dev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+       /* MAC doesn't support 1000HD */
+       temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
+       mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
+                       temp & ~ADVERTISE_1000HALF);
+
+       /* clear interrupt */
+       mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+       mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
+                       PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
+                       PHY_VTSE_INT_MASK_LINK_CHANGE_);
+
+       netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+
+       return 0;
+}
+
+static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
+{
+       int ret = 0;
+       u32 buf;
+       bool rxenabled;
+
+       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+
+       rxenabled = ((buf & MAC_RX_RXEN_) != 0);
+
+       if (rxenabled) {
+               buf &= ~MAC_RX_RXEN_;
+               ret = lan78xx_write_reg(dev, MAC_RX, buf);
+       }
+
+       /* add 4 to size for FCS */
+       buf &= ~MAC_RX_MAX_SIZE_MASK_;
+       buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
+
+       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+       if (rxenabled) {
+               buf |= MAC_RX_RXEN_;
+               ret = lan78xx_write_reg(dev, MAC_RX, buf);
+       }
+
+       return 0;
+}
+
+static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
+{
+       struct sk_buff *skb;
+       unsigned long flags;
+       int count = 0;
+
+       spin_lock_irqsave(&q->lock, flags);
+       while (!skb_queue_empty(q)) {
+               struct skb_data *entry;
+               struct urb *urb;
+               int ret;
+
+               skb_queue_walk(q, skb) {
+                       entry = (struct skb_data *)skb->cb;
+                       if (entry->state != unlink_start)
+                               goto found;
+               }
+               break;
+found:
+               entry->state = unlink_start;
+               urb = entry->urb;
+
+               /* Get reference count of the URB to avoid it to be
+                * freed during usb_unlink_urb, which may trigger
+                * use-after-free problem inside usb_unlink_urb since
+                * usb_unlink_urb is always racing with .complete
+                * handler(include defer_bh).
+                */
+               usb_get_urb(urb);
+               spin_unlock_irqrestore(&q->lock, flags);
+               /* during some PM-driven resume scenarios,
+                * these (async) unlinks complete immediately
+                */
+               ret = usb_unlink_urb(urb);
+               if (ret != -EINPROGRESS && ret != 0)
+                       netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
+               else
+                       count++;
+               usb_put_urb(urb);
+               spin_lock_irqsave(&q->lock, flags);
+       }
+       spin_unlock_irqrestore(&q->lock, flags);
+       return count;
+}
+
+static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       int ll_mtu = new_mtu + netdev->hard_header_len;
+       int old_hard_mtu = dev->hard_mtu;
+       int old_rx_urb_size = dev->rx_urb_size;
+       int ret;
+
+       if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+               return -EINVAL;
+
+       if (new_mtu <= 0)
+               return -EINVAL;
+       /* no second zero-length packet read wanted after mtu-sized packets */
+       if ((ll_mtu % dev->maxpacket) == 0)
+               return -EDOM;
+
+       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+
+       netdev->mtu = new_mtu;
+
+       dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
+       if (dev->rx_urb_size == old_hard_mtu) {
+               dev->rx_urb_size = dev->hard_mtu;
+               if (dev->rx_urb_size > old_rx_urb_size) {
+                       if (netif_running(dev->net)) {
+                               unlink_urbs(dev, &dev->rxq);
+                               tasklet_schedule(&dev->bh);
+                       }
+               }
+       }
+
+       return 0;
+}
+
+int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct sockaddr *addr = p;
+       u32 addr_lo, addr_hi;
+       int ret;
+
+       if (netif_running(netdev))
+               return -EBUSY;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
+       addr_lo = netdev->dev_addr[0] |
+                 netdev->dev_addr[1] << 8 |
+                 netdev->dev_addr[2] << 16 |
+                 netdev->dev_addr[3] << 24;
+       addr_hi = netdev->dev_addr[4] |
+                 netdev->dev_addr[5] << 8;
+
+       ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+       ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+
+       return 0;
+}
+
+/* Enable or disable Rx checksum offload engine */
+static int lan78xx_set_features(struct net_device *netdev,
+                               netdev_features_t features)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+       if (features & NETIF_F_RXCSUM) {
+               pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
+               pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
+       } else {
+               pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
+               pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
+       }
+
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
+       else
+               pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
+
+       spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+       ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+       return 0;
+}
+
+static void lan78xx_deferred_vlan_write(struct work_struct *param)
+{
+       struct lan78xx_priv *pdata =
+                       container_of(param, struct lan78xx_priv, set_vlan);
+       struct lan78xx_net *dev = pdata->dev;
+
+       lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
+                              DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
+}
+
+static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
+                                  __be16 proto, u16 vid)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u16 vid_bit_index;
+       u16 vid_dword_index;
+
+       vid_dword_index = (vid >> 5) & 0x7F;
+       vid_bit_index = vid & 0x1F;
+
+       pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
+
+       /* defer register writes to a sleepable context */
+       schedule_work(&pdata->set_vlan);
+
+       return 0;
+}
+
+static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
+                                   __be16 proto, u16 vid)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u16 vid_bit_index;
+       u16 vid_dword_index;
+
+       vid_dword_index = (vid >> 5) & 0x7F;
+       vid_bit_index = vid & 0x1F;
+
+       pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
+
+       /* defer register writes to a sleepable context */
+       schedule_work(&pdata->set_vlan);
+
+       return 0;
+}
+
+static void lan78xx_init_ltm(struct lan78xx_net *dev)
+{
+       int ret;
+       u32 buf;
+       u32 regs[6] = { 0 };
+
+       ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+       if (buf & USB_CFG1_LTM_ENABLE_) {
+               u8 temp[2];
+               /* Get values from EEPROM first */
+               if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
+                       if (temp[0] == 24) {
+                               ret = lan78xx_read_raw_eeprom(dev,
+                                                             temp[1] * 2,
+                                                             24,
+                                                             (u8 *)regs);
+                               if (ret < 0)
+                                       return;
+                       }
+               } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
+                       if (temp[0] == 24) {
+                               ret = lan78xx_read_raw_otp(dev,
+                                                          temp[1] * 2,
+                                                          24,
+                                                          (u8 *)regs);
+                               if (ret < 0)
+                                       return;
+                       }
+               }
+       }
+
+       lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+       lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+       lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+       lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+       lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+       lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+}
+
+static int lan78xx_reset(struct lan78xx_net *dev)
+{
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u32 buf;
+       int ret = 0;
+       unsigned long timeout;
+
+       ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+       buf |= HW_CFG_LRST_;
+       ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+       timeout = jiffies + HZ;
+       do {
+               mdelay(1);
+               ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+               if (time_after(jiffies, timeout)) {
+                       netdev_warn(dev->net,
+                                   "timeout on completion of LiteReset");
+                       return -EIO;
+               }
+       } while (buf & HW_CFG_LRST_);
+
+       lan78xx_init_mac_address(dev);
+
+       /* Respond to the IN token with a NAK */
+       ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+       buf |= USB_CFG_BIR_;
+       ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+       /* Init LTM */
+       lan78xx_init_ltm(dev);
+
+       dev->net->hard_header_len += TX_OVERHEAD;
+       dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+       if (dev->udev->speed == USB_SPEED_SUPER) {
+               buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
+               dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+               dev->rx_qlen = 4;
+               dev->tx_qlen = 4;
+       } else if (dev->udev->speed == USB_SPEED_HIGH) {
+               buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+               dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+               dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
+               dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
+       } else {
+               buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+               dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+               dev->rx_qlen = 4;
+       }
+
+       ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+       ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+
+       ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+       buf |= HW_CFG_MEF_;
+       ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+       ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+       buf |= USB_CFG_BCE_;
+       ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+       /* set FIFO sizes */
+       buf = (MAX_RX_FIFO_SIZE - 512) / 512;
+       ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
+
+       buf = (MAX_TX_FIFO_SIZE - 512) / 512;
+       ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
+
+       ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+       ret = lan78xx_write_reg(dev, FLOW, 0);
+       ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
+
+       /* Don't need rfe_ctl_lock during initialisation */
+       ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+       pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
+       ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+       /* Enable or disable checksum offload engines */
+       lan78xx_set_features(dev->net, dev->net->features);
+
+       lan78xx_set_multicast(dev->net);
+
+       /* reset PHY */
+       ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+       buf |= PMT_CTL_PHY_RST_;
+       ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+       timeout = jiffies + HZ;
+       do {
+               mdelay(1);
+               ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+               if (time_after(jiffies, timeout)) {
+                       netdev_warn(dev->net, "timeout waiting for PHY Reset");
+                       return -EIO;
+               }
+       } while (buf & PMT_CTL_PHY_RST_);
+
+       lan78xx_mii_init(dev);
+
+       ret = lan78xx_phy_init(dev);
+
+       ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+
+       buf |= MAC_CR_GMII_EN_;
+       buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+
+       ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+       /* enable on PHY */
+       if (buf & MAC_CR_EEE_EN_)
+               lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
+
+       /* enable PHY interrupts */
+       ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+       buf |= INT_ENP_PHY_INT;
+       ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
+
+       ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+       buf |= MAC_TX_TXEN_;
+       ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+       ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
+       buf |= FCT_TX_CTL_EN_;
+       ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
+
+       ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+
+       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+       buf |= MAC_RX_RXEN_;
+       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+       ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
+       buf |= FCT_RX_CTL_EN_;
+       ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
+
+       if (!mii_nway_restart(&dev->mii))
+               netif_dbg(dev, link, dev->net, "autoneg initiated");
+
+       return 0;
+}
+
+static int lan78xx_open(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               goto out;
+
+       ret = lan78xx_reset(dev);
+       if (ret < 0)
+               goto done;
+
+       /* for Link Check */
+       if (dev->urb_intr) {
+               ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
+               if (ret < 0) {
+                       netif_err(dev, ifup, dev->net,
+                                 "intr submit %d\n", ret);
+                       goto done;
+               }
+       }
+
+       set_bit(EVENT_DEV_OPEN, &dev->flags);
+
+       netif_start_queue(net);
+
+       dev->link_on = false;
+
+       lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+done:
+       usb_autopm_put_interface(dev->intf);
+
+out:
+       return ret;
+}
+
+static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+{
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+       DECLARE_WAITQUEUE(wait, current);
+       int temp;
+
+       /* ensure there are no more active urbs */
+       add_wait_queue(&unlink_wakeup, &wait);
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       dev->wait = &unlink_wakeup;
+       temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
+
+       /* maybe wait for deletions to finish. */
+       while (!skb_queue_empty(&dev->rxq) &&
+              !skb_queue_empty(&dev->txq) &&
+              !skb_queue_empty(&dev->done)) {
+               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               netif_dbg(dev, ifdown, dev->net,
+                         "waited for %d urb completions\n", temp);
+       }
+       set_current_state(TASK_RUNNING);
+       dev->wait = NULL;
+       remove_wait_queue(&unlink_wakeup, &wait);
+}
+
+int lan78xx_stop(struct net_device *net)
+{
+       struct lan78xx_net              *dev = netdev_priv(net);
+
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
+       netif_stop_queue(net);
+
+       netif_info(dev, ifdown, dev->net,
+                  "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
+                  net->stats.rx_packets, net->stats.tx_packets,
+                  net->stats.rx_errors, net->stats.tx_errors);
+
+       lan78xx_terminate_urbs(dev);
+
+       usb_kill_urb(dev->urb_intr);
+
+       skb_queue_purge(&dev->rxq_pause);
+
+       /* deferred work (task, timer, softirq) must also stop.
+        * can't flush_scheduled_work() until we drop rtnl (later),
+        * else workers could deadlock; so make workers a NOP.
+        */
+       dev->flags = 0;
+       cancel_delayed_work_sync(&dev->wq);
+       tasklet_kill(&dev->bh);
+
+       usb_autopm_put_interface(dev->intf);
+
+       return 0;
+}
+
+static int lan78xx_linearize(struct sk_buff *skb)
+{
+       return skb_linearize(skb);
+}
+
+static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
+                                      struct sk_buff *skb, gfp_t flags)
+{
+       u32 tx_cmd_a, tx_cmd_b;
+
+       if (skb_headroom(skb) < TX_OVERHEAD) {
+               struct sk_buff *skb2;
+
+               skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+               dev_kfree_skb_any(skb);
+               skb = skb2;
+               if (!skb)
+                       return NULL;
+       }
+
+       if (lan78xx_linearize(skb) < 0)
+               return NULL;
+
+       tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
+
+       tx_cmd_b = 0;
+       if (skb_is_gso(skb)) {
+               u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
+
+               tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
+
+               tx_cmd_a |= TX_CMD_A_LSO_;
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               tx_cmd_a |= TX_CMD_A_IVTG_;
+               tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
+       }
+
+       skb_push(skb, 4);
+       cpu_to_le32s(&tx_cmd_b);
+       memcpy(skb->data, &tx_cmd_b, 4);
+
+       skb_push(skb, 4);
+       cpu_to_le32s(&tx_cmd_a);
+       memcpy(skb->data, &tx_cmd_a, 4);
+
+       return skb;
+}
+
+static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
+                              struct sk_buff_head *list, enum skb_state state)
+{
+       unsigned long flags;
+       enum skb_state old_state;
+       struct skb_data *entry = (struct skb_data *)skb->cb;
+
+       spin_lock_irqsave(&list->lock, flags);
+       old_state = entry->state;
+       entry->state = state;
+       if (!list->prev)
+               BUG_ON(!list->prev);
+       if (!list->next)
+               BUG_ON(!list->next);
+       if (!skb->prev || !skb->next)
+               BUG_ON(true);
+
+       __skb_unlink(skb, list);
+       spin_unlock(&list->lock);
+       spin_lock(&dev->done.lock);
+       if (!dev->done.prev)
+               BUG_ON(!dev->done.prev);
+       if (!dev->done.next)
+               BUG_ON(!dev->done.next);
+
+       __skb_queue_tail(&dev->done, skb);
+       if (skb_queue_len(&dev->done) == 1)
+               tasklet_schedule(&dev->bh);
+       spin_unlock_irqrestore(&dev->done.lock, flags);
+
+       return old_state;
+}
+
+static void tx_complete(struct urb *urb)
+{
+       struct sk_buff *skb = (struct sk_buff *)urb->context;
+       struct skb_data *entry = (struct skb_data *)skb->cb;
+       struct lan78xx_net *dev = entry->dev;
+
+       if (urb->status == 0) {
+               dev->net->stats.tx_packets++;
+               dev->net->stats.tx_bytes += entry->length;
+       } else {
+               dev->net->stats.tx_errors++;
+
+               switch (urb->status) {
+               case -EPIPE:
+                       lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+                       break;
+
+               /* software-driven interface shutdown */
+               case -ECONNRESET:
+               case -ESHUTDOWN:
+                       break;
+
+               case -EPROTO:
+               case -ETIME:
+               case -EILSEQ:
+                       netif_stop_queue(dev->net);
+                       break;
+               default:
+                       netif_dbg(dev, tx_err, dev->net,
+                                 "tx err %d\n", entry->urb->status);
+                       break;
+               }
+       }
+
+       usb_autopm_put_interface_async(dev->intf);
+
+       if (skb)
+               defer_bh(dev, skb, &dev->txq, tx_done);
+}
+
+static void lan78xx_queue_skb(struct sk_buff_head *list,
+                             struct sk_buff *newsk, enum skb_state state)
+{
+       struct skb_data *entry = (struct skb_data *)newsk->cb;
+
+       __skb_queue_tail(list, newsk);
+       entry->state = state;
+}
+
+netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       if (skb)
+               skb_tx_timestamp(skb);
+
+       skb = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
+       if (skb) {
+               skb_queue_tail(&dev->txq_pend, skb);
+
+               if (skb_queue_len(&dev->txq_pend) > 10)
+                       netif_stop_queue(net);
+       } else {
+               netif_dbg(dev, tx_err, dev->net,
+                         "lan78xx_tx_prep return NULL\n");
+               dev->net->stats.tx_errors++;
+               dev->net->stats.tx_dropped++;
+       }
+
+       tasklet_schedule(&dev->bh);
+
+       return NETDEV_TX_OK;
+}
+
+int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+       int tmp;
+       struct usb_host_interface *alt = NULL;
+       struct usb_host_endpoint *in = NULL, *out = NULL;
+       struct usb_host_endpoint *status = NULL;
+
+       for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
+               unsigned ep;
+
+               in = NULL;
+               out = NULL;
+               status = NULL;
+               alt = intf->altsetting + tmp;
+
+               for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
+                       struct usb_host_endpoint *e;
+                       int intr = 0;
+
+                       e = alt->endpoint + ep;
+                       switch (e->desc.bmAttributes) {
+                       case USB_ENDPOINT_XFER_INT:
+                               if (!usb_endpoint_dir_in(&e->desc))
+                                       continue;
+                               intr = 1;
+                               /* FALLTHROUGH */
+                       case USB_ENDPOINT_XFER_BULK:
+                               break;
+                       default:
+                               continue;
+                       }
+                       if (usb_endpoint_dir_in(&e->desc)) {
+                               if (!intr && !in)
+                                       in = e;
+                               else if (intr && !status)
+                                       status = e;
+                       } else {
+                               if (!out)
+                                       out = e;
+                       }
+               }
+               if (in && out)
+                       break;
+       }
+       if (!alt || !in || !out)
+               return -EINVAL;
+
+       dev->pipe_in = usb_rcvbulkpipe(dev->udev,
+                                      in->desc.bEndpointAddress &
+                                      USB_ENDPOINT_NUMBER_MASK);
+       dev->pipe_out = usb_sndbulkpipe(dev->udev,
+                                       out->desc.bEndpointAddress &
+                                       USB_ENDPOINT_NUMBER_MASK);
+       dev->ep_intr = status;
+
+       return 0;
+}
+
+static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+       struct lan78xx_priv *pdata = NULL;
+       int ret;
+       int i;
+
+       ret = lan78xx_get_endpoints(dev, intf);
+
+       dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+       pdata = (struct lan78xx_priv *)(dev->data[0]);
+       if (!pdata) {
+               netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
+               return -ENOMEM;
+       }
+
+       pdata->dev = dev;
+
+       spin_lock_init(&pdata->rfe_ctl_lock);
+       mutex_init(&pdata->dataport_mutex);
+
+       INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
+
+       for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
+               pdata->vlan_table[i] = 0;
+
+       INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
+
+       dev->net->features = 0;
+
+       if (DEFAULT_TX_CSUM_ENABLE)
+               dev->net->features |= NETIF_F_HW_CSUM;
+
+       if (DEFAULT_RX_CSUM_ENABLE)
+               dev->net->features |= NETIF_F_RXCSUM;
+
+       if (DEFAULT_TSO_CSUM_ENABLE)
+               dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
+
+       dev->net->hw_features = dev->net->features;
+
+       /* Init all registers */
+       ret = lan78xx_reset(dev);
+
+       dev->net->flags |= IFF_MULTICAST;
+
+       pdata->wol = WAKE_MAGIC;
+
+       return 0;
+}
+
+static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+       if (pdata) {
+               netif_dbg(dev, ifdown, dev->net, "free pdata");
+               kfree(pdata);
+               pdata = NULL;
+               dev->data[0] = 0;
+       }
+}
+
+static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
+                                   struct sk_buff *skb,
+                                   u32 rx_cmd_a, u32 rx_cmd_b)
+{
+       if (!(dev->net->features & NETIF_F_RXCSUM) ||
+           unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+               skb->ip_summed = CHECKSUM_NONE;
+       } else {
+               skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
+               skb->ip_summed = CHECKSUM_COMPLETE;
+       }
+}
+
+void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+       int             status;
+
+       if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
+               skb_queue_tail(&dev->rxq_pause, skb);
+               return;
+       }
+
+       skb->protocol = eth_type_trans(skb, dev->net);
+       dev->net->stats.rx_packets++;
+       dev->net->stats.rx_bytes += skb->len;
+
+       netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
+                 skb->len + sizeof(struct ethhdr), skb->protocol);
+       memset(skb->cb, 0, sizeof(struct skb_data));
+
+       if (skb_defer_rx_timestamp(skb))
+               return;
+
+       status = netif_rx(skb);
+       if (status != NET_RX_SUCCESS)
+               netif_dbg(dev, rx_err, dev->net,
+                         "netif_rx status %d\n", status);
+}
+
+static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+       if (skb->len < dev->net->hard_header_len)
+               return 0;
+
+       while (skb->len > 0) {
+               u32 rx_cmd_a, rx_cmd_b, align_count, size;
+               u16 rx_cmd_c;
+               struct sk_buff *skb2;
+               unsigned char *packet;
+
+               memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
+               le32_to_cpus(&rx_cmd_a);
+               skb_pull(skb, sizeof(rx_cmd_a));
+
+               memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
+               le32_to_cpus(&rx_cmd_b);
+               skb_pull(skb, sizeof(rx_cmd_b));
+
+               memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
+               le16_to_cpus(&rx_cmd_c);
+               skb_pull(skb, sizeof(rx_cmd_c));
+
+               packet = skb->data;
+
+               /* get the packet length */
+               size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
+               align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+
+               if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "Error rx_cmd_a=0x%08x", rx_cmd_a);
+               } else {
+                       /* last frame in this batch */
+                       if (skb->len == size) {
+                               lan78xx_rx_csum_offload(dev, skb,
+                                                       rx_cmd_a, rx_cmd_b);
+
+                               skb_trim(skb, skb->len - 4); /* remove fcs */
+                               skb->truesize = size + sizeof(struct sk_buff);
+
+                               return 1;
+                       }
+
+                       skb2 = skb_clone(skb, GFP_ATOMIC);
+                       if (unlikely(!skb2)) {
+                               netdev_warn(dev->net, "Error allocating skb");
+                               return 0;
+                       }
+
+                       skb2->len = size;
+                       skb2->data = packet;
+                       skb_set_tail_pointer(skb2, size);
+
+                       lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+
+                       skb_trim(skb2, skb2->len - 4); /* remove fcs */
+                       skb2->truesize = size + sizeof(struct sk_buff);
+
+                       lan78xx_skb_return(dev, skb2);
+               }
+
+               skb_pull(skb, size);
+
+               /* padding bytes before the next frame starts */
+               if (skb->len)
+                       skb_pull(skb, align_count);
+       }
+
+       if (unlikely(skb->len < 0)) {
+               netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+               return 0;
+       }
+
+       return 1;
+}
+
+static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+       if (!lan78xx_rx(dev, skb)) {
+               dev->net->stats.rx_errors++;
+               goto done;
+       }
+
+       if (skb->len) {
+               lan78xx_skb_return(dev, skb);
+               return;
+       }
+
+       netif_dbg(dev, rx_err, dev->net, "drop\n");
+       dev->net->stats.rx_errors++;
+done:
+       skb_queue_tail(&dev->done, skb);
+}
+
+static void rx_complete(struct urb *urb);
+
+static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
+{
+       struct sk_buff *skb;
+       struct skb_data *entry;
+       unsigned long lockflags;
+       size_t size = dev->rx_urb_size;
+       int ret = 0;
+
+       skb = netdev_alloc_skb_ip_align(dev->net, size);
+       if (!skb) {
+               usb_free_urb(urb);
+               return -ENOMEM;
+       }
+
+       entry = (struct skb_data *)skb->cb;
+       entry->urb = urb;
+       entry->dev = dev;
+       entry->length = 0;
+
+       usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
+                         skb->data, size, rx_complete, skb);
+
+       spin_lock_irqsave(&dev->rxq.lock, lockflags);
+
+       if (netif_device_present(dev->net) &&
+           netif_running(dev->net) &&
+           !test_bit(EVENT_RX_HALT, &dev->flags) &&
+           !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+               ret = usb_submit_urb(urb, GFP_ATOMIC);
+               switch (ret) {
+               case 0:
+                       lan78xx_queue_skb(&dev->rxq, skb, rx_start);
+                       break;
+               case -EPIPE:
+                       lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+                       break;
+               case -ENODEV:
+                       netif_dbg(dev, ifdown, dev->net, "device gone\n");
+                       netif_device_detach(dev->net);
+                       break;
+               case -EHOSTUNREACH:
+                       ret = -ENOLINK;
+                       break;
+               default:
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "rx submit, %d\n", ret);
+                       tasklet_schedule(&dev->bh);
+               }
+       } else {
+               netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+               ret = -ENOLINK;
+       }
+       spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               usb_free_urb(urb);
+       }
+       return ret;
+}
+
+static void rx_complete(struct urb *urb)
+{
+       struct sk_buff  *skb = (struct sk_buff *)urb->context;
+       struct skb_data *entry = (struct skb_data *)skb->cb;
+       struct lan78xx_net *dev = entry->dev;
+       int urb_status = urb->status;
+       enum skb_state state;
+
+       skb_put(skb, urb->actual_length);
+       state = rx_done;
+       entry->urb = NULL;
+
+       switch (urb_status) {
+       case 0:
+               if (skb->len < dev->net->hard_header_len) {
+                       state = rx_cleanup;
+                       dev->net->stats.rx_errors++;
+                       dev->net->stats.rx_length_errors++;
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "rx length %d\n", skb->len);
+               }
+               usb_mark_last_busy(dev->udev);
+               break;
+       case -EPIPE:
+               dev->net->stats.rx_errors++;
+               lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+               /* FALLTHROUGH */
+       case -ECONNRESET:                               /* async unlink */
+       case -ESHUTDOWN:                                /* hardware gone */
+               netif_dbg(dev, ifdown, dev->net,
+                         "rx shutdown, code %d\n", urb_status);
+               state = rx_cleanup;
+               entry->urb = urb;
+               urb = NULL;
+               break;
+       case -EPROTO:
+       case -ETIME:
+       case -EILSEQ:
+               dev->net->stats.rx_errors++;
+               state = rx_cleanup;
+               entry->urb = urb;
+               urb = NULL;
+               break;
+
+       /* data overrun ... flush fifo? */
+       case -EOVERFLOW:
+               dev->net->stats.rx_over_errors++;
+               /* FALLTHROUGH */
+
+       default:
+               state = rx_cleanup;
+               dev->net->stats.rx_errors++;
+               netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
+               break;
+       }
+
+       state = defer_bh(dev, skb, &dev->rxq, state);
+
+       if (urb) {
+               if (netif_running(dev->net) &&
+                   !test_bit(EVENT_RX_HALT, &dev->flags) &&
+                   state != unlink_start) {
+                       rx_submit(dev, urb, GFP_ATOMIC);
+                       return;
+               }
+               usb_free_urb(urb);
+       }
+       netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+}
+
+static void lan78xx_tx_bh(struct lan78xx_net *dev)
+{
+       int length;
+       struct urb *urb = NULL;
+       struct skb_data *entry;
+       unsigned long flags;
+       struct sk_buff_head *tqp = &dev->txq_pend;
+       struct sk_buff *skb, *skb2;
+       int ret;
+       int count, pos;
+       int skb_totallen, pkt_cnt;
+
+       skb_totallen = 0;
+       pkt_cnt = 0;
+       for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+               if (skb_is_gso(skb)) {
+                       if (pkt_cnt) {
+                               /* handle previous packets first */
+                               break;
+                       }
+                       length = skb->len;
+                       skb2 = skb_dequeue(tqp);
+                       goto gso_skb;
+               }
+
+               if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
+                       break;
+               skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
+               pkt_cnt++;
+       }
+
+       /* copy to a single skb */
+       skb = alloc_skb(skb_totallen, GFP_ATOMIC);
+       if (!skb)
+               goto drop;
+
+       skb_put(skb, skb_totallen);
+
+       for (count = pos = 0; count < pkt_cnt; count++) {
+               skb2 = skb_dequeue(tqp);
+               if (skb2) {
+                       memcpy(skb->data + pos, skb2->data, skb2->len);
+                       pos += roundup(skb2->len, sizeof(u32));
+                       dev_kfree_skb(skb2);
+               } else {
+                       BUG_ON(true);
+               }
+       }
+
+       length = skb_totallen;
+
+gso_skb:
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               netif_dbg(dev, tx_err, dev->net, "no urb\n");
+               goto drop;
+       }
+
+       entry = (struct skb_data *)skb->cb;
+       entry->urb = urb;
+       entry->dev = dev;
+       entry->length = length;
+
+       spin_lock_irqsave(&dev->txq.lock, flags);
+       ret = usb_autopm_get_interface_async(dev->intf);
+       if (ret < 0) {
+               spin_unlock_irqrestore(&dev->txq.lock, flags);
+               goto drop;
+       }
+
+       usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
+                         skb->data, skb->len, tx_complete, skb);
+
+       if (length % dev->maxpacket == 0) {
+               /* send USB_ZERO_PACKET */
+               urb->transfer_flags |= URB_ZERO_PACKET;
+       }
+
+#ifdef CONFIG_PM
+       /* if this triggers the device is still a sleep */
+       if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+               /* transmission will be done in resume */
+               usb_anchor_urb(urb, &dev->deferred);
+               /* no use to process more packets */
+               netif_stop_queue(dev->net);
+               usb_put_urb(urb);
+               spin_unlock_irqrestore(&dev->txq.lock, flags);
+               netdev_dbg(dev->net, "Delaying transmission for resumption\n");
+               return;
+       }
+#endif
+
+       ret = usb_submit_urb(urb, GFP_ATOMIC);
+       switch (ret) {
+       case 0:
+               dev->net->trans_start = jiffies;
+               lan78xx_queue_skb(&dev->txq, skb, tx_start);
+               if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
+                       netif_stop_queue(dev->net);
+               break;
+       case -EPIPE:
+               netif_stop_queue(dev->net);
+               lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+               usb_autopm_put_interface_async(dev->intf);
+               break;
+       default:
+               usb_autopm_put_interface_async(dev->intf);
+               netif_dbg(dev, tx_err, dev->net,
+                         "tx: submit urb err %d\n", ret);
+               break;
+       }
+
+       spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+       if (ret) {
+               netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
+drop:
+               dev->net->stats.tx_dropped++;
+               if (skb)
+                       dev_kfree_skb_any(skb);
+               usb_free_urb(urb);
+       } else
+               netif_dbg(dev, tx_queued, dev->net,
+                         "> tx, len %d, type 0x%x\n", length, skb->protocol);
+}
+
+static void lan78xx_rx_bh(struct lan78xx_net *dev)
+{
+       struct urb *urb;
+       int i;
+
+       if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
+               for (i = 0; i < 10; i++) {
+                       if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
+                               break;
+                       urb = usb_alloc_urb(0, GFP_ATOMIC);
+                       if (urb)
+                               if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
+                                       return;
+               }
+
+               if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
+                       tasklet_schedule(&dev->bh);
+       }
+       if (skb_queue_len(&dev->txq) < dev->tx_qlen)
+               netif_wake_queue(dev->net);
+}
+
+static void lan78xx_bh(unsigned long param)
+{
+       struct lan78xx_net *dev = (struct lan78xx_net *)param;
+       struct sk_buff *skb;
+       struct skb_data *entry;
+
+       if (!dev->done.prev)
+               BUG_ON(!dev->done.prev);
+       if (!dev->done.next)
+               BUG_ON(!dev->done.next);
+
+       while ((skb = skb_dequeue(&dev->done))) {
+               entry = (struct skb_data *)(skb->cb);
+               switch (entry->state) {
+               case rx_done:
+                       entry->state = rx_cleanup;
+                       rx_process(dev, skb);
+                       continue;
+               case tx_done:
+                       usb_free_urb(entry->urb);
+                       dev_kfree_skb(skb);
+                       continue;
+               case rx_cleanup:
+                       usb_free_urb(entry->urb);
+                       dev_kfree_skb(skb);
+                       continue;
+               default:
+                       netdev_dbg(dev->net, "skb state %d\n", entry->state);
+                       return;
+               }
+               if (!dev->done.prev)
+                       BUG_ON(!dev->done.prev);
+               if (!dev->done.next)
+                       BUG_ON(!dev->done.next);
+       }
+
+       if (netif_device_present(dev->net) && netif_running(dev->net)) {
+               if (!skb_queue_empty(&dev->txq_pend))
+                       lan78xx_tx_bh(dev);
+
+               if (!timer_pending(&dev->delay) &&
+                   !test_bit(EVENT_RX_HALT, &dev->flags))
+                       lan78xx_rx_bh(dev);
+       }
+}
+
+static void lan78xx_delayedwork(struct work_struct *work)
+{
+       int status;
+       struct lan78xx_net *dev;
+
+       dev = container_of(work, struct lan78xx_net, wq.work);
+
+       if (test_bit(EVENT_TX_HALT, &dev->flags)) {
+               unlink_urbs(dev, &dev->txq);
+               status = usb_autopm_get_interface(dev->intf);
+               if (status < 0)
+                       goto fail_pipe;
+               status = usb_clear_halt(dev->udev, dev->pipe_out);
+               usb_autopm_put_interface(dev->intf);
+               if (status < 0 &&
+                   status != -EPIPE &&
+                   status != -ESHUTDOWN) {
+                       if (netif_msg_tx_err(dev))
+fail_pipe:
+                               netdev_err(dev->net,
+                                          "can't clear tx halt, status %d\n",
+                                          status);
+               } else {
+                       clear_bit(EVENT_TX_HALT, &dev->flags);
+                       if (status != -ESHUTDOWN)
+                               netif_wake_queue(dev->net);
+               }
+       }
+       if (test_bit(EVENT_RX_HALT, &dev->flags)) {
+               unlink_urbs(dev, &dev->rxq);
+               status = usb_autopm_get_interface(dev->intf);
+               if (status < 0)
+                               goto fail_halt;
+               status = usb_clear_halt(dev->udev, dev->pipe_in);
+               usb_autopm_put_interface(dev->intf);
+               if (status < 0 &&
+                   status != -EPIPE &&
+                   status != -ESHUTDOWN) {
+                       if (netif_msg_rx_err(dev))
+fail_halt:
+                               netdev_err(dev->net,
+                                          "can't clear rx halt, status %d\n",
+                                          status);
+               } else {
+                       clear_bit(EVENT_RX_HALT, &dev->flags);
+                       tasklet_schedule(&dev->bh);
+               }
+       }
+
+       if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
+               int ret = 0;
+
+               clear_bit(EVENT_LINK_RESET, &dev->flags);
+               status = usb_autopm_get_interface(dev->intf);
+               if (status < 0)
+                       goto skip_reset;
+               if (lan78xx_link_reset(dev) < 0) {
+                       usb_autopm_put_interface(dev->intf);
+skip_reset:
+                       netdev_info(dev->net, "link reset failed (%d)\n",
+                                   ret);
+               } else {
+                       usb_autopm_put_interface(dev->intf);
+               }
+       }
+}
+
+static void intr_complete(struct urb *urb)
+{
+       struct lan78xx_net *dev = urb->context;
+       int status = urb->status;
+
+       switch (status) {
+       /* success */
+       case 0:
+               lan78xx_status(dev, urb);
+               break;
+
+       /* software-driven interface shutdown */
+       case -ENOENT:                   /* urb killed */
+       case -ESHUTDOWN:                /* hardware gone */
+               netif_dbg(dev, ifdown, dev->net,
+                         "intr shutdown, code %d\n", status);
+               return;
+
+       /* NOTE:  not throttling like RX/TX, since this endpoint
+        * already polls infrequently
+        */
+       default:
+               netdev_dbg(dev->net, "intr status %d\n", status);
+               break;
+       }
+
+       if (!netif_running(dev->net))
+               return;
+
+       memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
+       status = usb_submit_urb(urb, GFP_ATOMIC);
+       if (status != 0)
+               netif_err(dev, timer, dev->net,
+                         "intr resubmit --> %d\n", status);
+}
+
+static void lan78xx_disconnect(struct usb_interface *intf)
+{
+       struct lan78xx_net              *dev;
+       struct usb_device               *udev;
+       struct net_device               *net;
+
+       dev = usb_get_intfdata(intf);
+       usb_set_intfdata(intf, NULL);
+       if (!dev)
+               return;
+
+       udev = interface_to_usbdev(intf);
+
+       net = dev->net;
+       unregister_netdev(net);
+
+       cancel_delayed_work_sync(&dev->wq);
+
+       usb_scuttle_anchored_urbs(&dev->deferred);
+
+       lan78xx_unbind(dev, intf);
+
+       usb_kill_urb(dev->urb_intr);
+       usb_free_urb(dev->urb_intr);
+
+       free_netdev(net);
+       usb_put_dev(udev);
+}
+
+void lan78xx_tx_timeout(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       unlink_urbs(dev, &dev->txq);
+       tasklet_schedule(&dev->bh);
+}
+
+static const struct net_device_ops lan78xx_netdev_ops = {
+       .ndo_open               = lan78xx_open,
+       .ndo_stop               = lan78xx_stop,
+       .ndo_start_xmit         = lan78xx_start_xmit,
+       .ndo_tx_timeout         = lan78xx_tx_timeout,
+       .ndo_change_mtu         = lan78xx_change_mtu,
+       .ndo_set_mac_address    = lan78xx_set_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = lan78xx_ioctl,
+       .ndo_set_rx_mode        = lan78xx_set_multicast,
+       .ndo_set_features       = lan78xx_set_features,
+       .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
+};
+
+static int lan78xx_probe(struct usb_interface *intf,
+                        const struct usb_device_id *id)
+{
+       struct lan78xx_net *dev;
+       struct net_device *netdev;
+       struct usb_device *udev;
+       int ret;
+       unsigned maxp;
+       unsigned period;
+       u8 *buf = NULL;
+
+       udev = interface_to_usbdev(intf);
+       udev = usb_get_dev(udev);
+
+       ret = -ENOMEM;
+       netdev = alloc_etherdev(sizeof(struct lan78xx_net));
+       if (!netdev) {
+                       dev_err(&intf->dev, "Error: OOM\n");
+                       goto out1;
+       }
+
+       /* netdev_printk() needs this */
+       SET_NETDEV_DEV(netdev, &intf->dev);
+
+       dev = netdev_priv(netdev);
+       dev->udev = udev;
+       dev->intf = intf;
+       dev->net = netdev;
+       dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
+                                       | NETIF_MSG_PROBE | NETIF_MSG_LINK);
+
+       skb_queue_head_init(&dev->rxq);
+       skb_queue_head_init(&dev->txq);
+       skb_queue_head_init(&dev->done);
+       skb_queue_head_init(&dev->rxq_pause);
+       skb_queue_head_init(&dev->txq_pend);
+       mutex_init(&dev->phy_mutex);
+
+       tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
+       INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
+       init_usb_anchor(&dev->deferred);
+
+       netdev->netdev_ops = &lan78xx_netdev_ops;
+       netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
+       netdev->ethtool_ops = &lan78xx_ethtool_ops;
+
+       ret = lan78xx_bind(dev, intf);
+       if (ret < 0)
+               goto out2;
+       strcpy(netdev->name, "eth%d");
+
+       if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
+               netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+
+       dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
+       dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
+       dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
+
+       dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+       dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+
+       dev->pipe_intr = usb_rcvintpipe(dev->udev,
+                                       dev->ep_intr->desc.bEndpointAddress &
+                                       USB_ENDPOINT_NUMBER_MASK);
+       period = dev->ep_intr->desc.bInterval;
+
+       maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+       buf = kmalloc(maxp, GFP_KERNEL);
+       if (buf) {
+               dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+               if (!dev->urb_intr) {
+                       kfree(buf);
+                       goto out3;
+               } else {
+                       usb_fill_int_urb(dev->urb_intr, dev->udev,
+                                        dev->pipe_intr, buf, maxp,
+                                        intr_complete, dev, period);
+               }
+       }
+
+       dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+
+       /* driver requires remote-wakeup capability during autosuspend. */
+       intf->needs_remote_wakeup = 1;
+
+       ret = register_netdev(netdev);
+       if (ret != 0) {
+               netif_err(dev, probe, netdev, "couldn't register the device\n");
+               goto out2;
+       }
+
+       usb_set_intfdata(intf, dev);
+
+       ret = device_set_wakeup_enable(&udev->dev, true);
+
+        /* Default delay of 2sec has more overhead than advantage.
+         * Set to 10sec as default.
+         */
+       pm_runtime_set_autosuspend_delay(&udev->dev,
+                                        DEFAULT_AUTOSUSPEND_DELAY);
+
+       return 0;
+
+       usb_set_intfdata(intf, NULL);
+out3:
+       lan78xx_unbind(dev, intf);
+out2:
+       free_netdev(netdev);
+out1:
+       usb_put_dev(udev);
+
+       return ret;
+}
+
+static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
+{
+       const u16 crc16poly = 0x8005;
+       int i;
+       u16 bit, crc, msb;
+       u8 data;
+
+       crc = 0xFFFF;
+       for (i = 0; i < len; i++) {
+               data = *buf++;
+               for (bit = 0; bit < 8; bit++) {
+                       msb = crc >> 15;
+                       crc <<= 1;
+
+                       if (msb ^ (u16)(data & 1)) {
+                               crc ^= crc16poly;
+                               crc |= (u16)0x0001U;
+                       }
+                       data >>= 1;
+               }
+       }
+
+       return crc;
+}
+
+static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
+{
+       u32 buf;
+       int ret;
+       int mask_index;
+       u16 crc;
+       u32 temp_wucsr;
+       u32 temp_pmt_ctl;
+       const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+       const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+       const u8 arp_type[2] = { 0x08, 0x06 };
+
+       ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+       buf &= ~MAC_TX_TXEN_;
+       ret = lan78xx_write_reg(dev, MAC_TX, buf);
+       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+       buf &= ~MAC_RX_RXEN_;
+       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+       ret = lan78xx_write_reg(dev, WUCSR, 0);
+       ret = lan78xx_write_reg(dev, WUCSR2, 0);
+       ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+       temp_wucsr = 0;
+
+       temp_pmt_ctl = 0;
+       ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+       temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
+       temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
+
+       for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
+               ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+
+       mask_index = 0;
+       if (wol & WAKE_PHY) {
+               temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       if (wol & WAKE_MAGIC) {
+               temp_wucsr |= WUCSR_MPEN_;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
+       }
+       if (wol & WAKE_BCAST) {
+               temp_wucsr |= WUCSR_BCST_EN_;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       if (wol & WAKE_MCAST) {
+               temp_wucsr |= WUCSR_WAKE_EN_;
+
+               /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
+               crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
+               ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+                                       WUF_CFGX_EN_ |
+                                       WUF_CFGX_TYPE_MCAST_ |
+                                       (0 << WUF_CFGX_OFFSET_SHIFT_) |
+                                       (crc & WUF_CFGX_CRC16_MASK_));
+
+               ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+               ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+               mask_index++;
+
+               /* for IPv6 Multicast */
+               crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
+               ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+                                       WUF_CFGX_EN_ |
+                                       WUF_CFGX_TYPE_MCAST_ |
+                                       (0 << WUF_CFGX_OFFSET_SHIFT_) |
+                                       (crc & WUF_CFGX_CRC16_MASK_));
+
+               ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+               ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+               mask_index++;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       if (wol & WAKE_UCAST) {
+               temp_wucsr |= WUCSR_PFDA_EN_;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       if (wol & WAKE_ARP) {
+               temp_wucsr |= WUCSR_WAKE_EN_;
+
+               /* set WUF_CFG & WUF_MASK
+                * for packettype (offset 12,13) = ARP (0x0806)
+                */
+               crc = lan78xx_wakeframe_crc16(arp_type, 2);
+               ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+                                       WUF_CFGX_EN_ |
+                                       WUF_CFGX_TYPE_ALL_ |
+                                       (0 << WUF_CFGX_OFFSET_SHIFT_) |
+                                       (crc & WUF_CFGX_CRC16_MASK_));
+
+               ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+               ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+               mask_index++;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+
+       ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+
+       /* when multiple WOL bits are set */
+       if (hweight_long((unsigned long)wol) > 1) {
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+
+       /* clear WUPS */
+       ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+       buf |= PMT_CTL_WUPS_MASK_;
+       ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+       buf |= MAC_RX_RXEN_;
+       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+       return 0;
+}
+
+int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct lan78xx_net *dev = usb_get_intfdata(intf);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u32 buf;
+       int ret;
+       int event;
+
+       ret = 0;
+       event = message.event;
+
+       if (!dev->suspend_count++) {
+               spin_lock_irq(&dev->txq.lock);
+               /* don't autosuspend while transmitting */
+               if ((skb_queue_len(&dev->txq) ||
+                    skb_queue_len(&dev->txq_pend)) &&
+                       PMSG_IS_AUTO(message)) {
+                       spin_unlock_irq(&dev->txq.lock);
+                       ret = -EBUSY;
+                       goto out;
+               } else {
+                       set_bit(EVENT_DEV_ASLEEP, &dev->flags);
+                       spin_unlock_irq(&dev->txq.lock);
+               }
+
+               /* stop TX & RX */
+               ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+               buf &= ~MAC_TX_TXEN_;
+               ret = lan78xx_write_reg(dev, MAC_TX, buf);
+               ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+               buf &= ~MAC_RX_RXEN_;
+               ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+               /* empty out the rx and queues */
+               netif_device_detach(dev->net);
+               lan78xx_terminate_urbs(dev);
+               usb_kill_urb(dev->urb_intr);
+
+               /* reattach */
+               netif_device_attach(dev->net);
+       }
+
+       if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+               if (PMSG_IS_AUTO(message)) {
+                       /* auto suspend (selective suspend) */
+                       ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+                       buf &= ~MAC_TX_TXEN_;
+                       ret = lan78xx_write_reg(dev, MAC_TX, buf);
+                       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+                       buf &= ~MAC_RX_RXEN_;
+                       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+                       ret = lan78xx_write_reg(dev, WUCSR, 0);
+                       ret = lan78xx_write_reg(dev, WUCSR2, 0);
+                       ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+                       /* set goodframe wakeup */
+                       ret = lan78xx_read_reg(dev, WUCSR, &buf);
+
+                       buf |= WUCSR_RFE_WAKE_EN_;
+                       buf |= WUCSR_STORE_WAKE_;
+
+                       ret = lan78xx_write_reg(dev, WUCSR, buf);
+
+                       ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+                       buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
+                       buf |= PMT_CTL_RES_CLR_WKP_STS_;
+
+                       buf |= PMT_CTL_PHY_WAKE_EN_;
+                       buf |= PMT_CTL_WOL_EN_;
+                       buf &= ~PMT_CTL_SUS_MODE_MASK_;
+                       buf |= PMT_CTL_SUS_MODE_3_;
+
+                       ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+                       ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+                       buf |= PMT_CTL_WUPS_MASK_;
+
+                       ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+                       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+                       buf |= MAC_RX_RXEN_;
+                       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+               } else {
+                       lan78xx_set_suspend(dev, pdata->wol);
+               }
+       }
+
+out:
+       return ret;
+}
+
+int lan78xx_resume(struct usb_interface *intf)
+{
+       struct lan78xx_net *dev = usb_get_intfdata(intf);
+       struct sk_buff *skb;
+       struct urb *res;
+       int ret;
+       u32 buf;
+
+       if (!--dev->suspend_count) {
+               /* resume interrupt URBs */
+               if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
+                               usb_submit_urb(dev->urb_intr, GFP_NOIO);
+
+               spin_lock_irq(&dev->txq.lock);
+               while ((res = usb_get_from_anchor(&dev->deferred))) {
+                       skb = (struct sk_buff *)res->context;
+                       ret = usb_submit_urb(res, GFP_ATOMIC);
+                       if (ret < 0) {
+                               dev_kfree_skb_any(skb);
+                               usb_free_urb(res);
+                               usb_autopm_put_interface_async(dev->intf);
+                       } else {
+                               dev->net->trans_start = jiffies;
+                               lan78xx_queue_skb(&dev->txq, skb, tx_start);
+                       }
+               }
+
+               clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
+               spin_unlock_irq(&dev->txq.lock);
+
+               if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+                       if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
+                               netif_start_queue(dev->net);
+                       tasklet_schedule(&dev->bh);
+               }
+       }
+
+       ret = lan78xx_write_reg(dev, WUCSR2, 0);
+       ret = lan78xx_write_reg(dev, WUCSR, 0);
+       ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+       ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
+                                            WUCSR2_ARP_RCD_ |
+                                            WUCSR2_IPV6_TCPSYN_RCD_ |
+                                            WUCSR2_IPV4_TCPSYN_RCD_);
+
+       ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
+                                           WUCSR_EEE_RX_WAKE_ |
+                                           WUCSR_PFDA_FR_ |
+                                           WUCSR_RFE_WAKE_FR_ |
+                                           WUCSR_WUFR_ |
+                                           WUCSR_MPR_ |
+                                           WUCSR_BCST_FR_);
+
+       ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+       buf |= MAC_TX_TXEN_;
+       ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+       return 0;
+}
+
+int lan78xx_reset_resume(struct usb_interface *intf)
+{
+       struct lan78xx_net *dev = usb_get_intfdata(intf);
+
+       lan78xx_reset(dev);
+       return lan78xx_resume(intf);
+}
+
+static const struct usb_device_id products[] = {
+       {
+       /* LAN7800 USB Gigabit Ethernet Device */
+       USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
+       },
+       {
+       /* LAN7850 USB Gigabit Ethernet Device */
+       USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver lan78xx_driver = {
+       .name                   = DRIVER_NAME,
+       .id_table               = products,
+       .probe                  = lan78xx_probe,
+       .disconnect             = lan78xx_disconnect,
+       .suspend                = lan78xx_suspend,
+       .resume                 = lan78xx_resume,
+       .reset_resume           = lan78xx_reset_resume,
+       .supports_autosuspend   = 1,
+       .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(lan78xx_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
new file mode 100644 (file)
index 0000000..ae7562e
--- /dev/null
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _LAN78XX_H
+#define _LAN78XX_H
+
+/* USB Vendor Requests */
+#define USB_VENDOR_REQUEST_WRITE_REGISTER      0xA0
+#define USB_VENDOR_REQUEST_READ_REGISTER       0xA1
+#define USB_VENDOR_REQUEST_GET_STATS           0xA2
+
+/* Interrupt Endpoint status word bitfields */
+#define INT_ENP_EEE_START_TX_LPI_INT           BIT(26)
+#define INT_ENP_EEE_STOP_TX_LPI_INT            BIT(25)
+#define INT_ENP_EEE_RX_LPI_INT                 BIT(24)
+#define INT_ENP_RDFO_INT                       BIT(22)
+#define INT_ENP_TXE_INT                                BIT(21)
+#define INT_ENP_TX_DIS_INT                     BIT(19)
+#define INT_ENP_RX_DIS_INT                     BIT(18)
+#define INT_ENP_PHY_INT                                BIT(17)
+#define INT_ENP_DP_INT                         BIT(16)
+#define INT_ENP_MAC_ERR_INT                    BIT(15)
+#define INT_ENP_TDFU_INT                       BIT(14)
+#define INT_ENP_TDFO_INT                       BIT(13)
+#define INT_ENP_UTX_FP_INT                     BIT(12)
+
+#define TX_PKT_ALIGNMENT                       4
+#define RX_PKT_ALIGNMENT                       4
+
+/* Tx Command A */
+#define TX_CMD_A_IGE_                  (0x20000000)
+#define TX_CMD_A_ICE_                  (0x10000000)
+#define TX_CMD_A_LSO_                  (0x08000000)
+#define TX_CMD_A_IPE_                  (0x04000000)
+#define TX_CMD_A_TPE_                  (0x02000000)
+#define TX_CMD_A_IVTG_                 (0x01000000)
+#define TX_CMD_A_RVTG_                 (0x00800000)
+#define TX_CMD_A_FCS_                  (0x00400000)
+#define TX_CMD_A_LEN_MASK_             (0x000FFFFF)
+
+/* Tx Command B */
+#define TX_CMD_B_MSS_SHIFT_            (16)
+#define TX_CMD_B_MSS_MASK_             (0x3FFF0000)
+#define TX_CMD_B_MSS_MIN_              ((unsigned short)8)
+#define TX_CMD_B_VTAG_MASK_            (0x0000FFFF)
+#define TX_CMD_B_VTAG_PRI_MASK_                (0x0000E000)
+#define TX_CMD_B_VTAG_CFI_MASK_                (0x00001000)
+#define TX_CMD_B_VTAG_VID_MASK_                (0x00000FFF)
+
+/* Rx Command A */
+#define RX_CMD_A_ICE_                  (0x80000000)
+#define RX_CMD_A_TCE_                  (0x40000000)
+#define RX_CMD_A_CSE_MASK_             (0xC0000000)
+#define RX_CMD_A_IPV_                  (0x20000000)
+#define RX_CMD_A_PID_MASK_             (0x18000000)
+#define RX_CMD_A_PID_NONE_IP_          (0x00000000)
+#define RX_CMD_A_PID_TCP_IP_           (0x08000000)
+#define RX_CMD_A_PID_UDP_IP_           (0x10000000)
+#define RX_CMD_A_PID_IP_               (0x18000000)
+#define RX_CMD_A_PFF_                  (0x04000000)
+#define RX_CMD_A_BAM_                  (0x02000000)
+#define RX_CMD_A_MAM_                  (0x01000000)
+#define RX_CMD_A_FVTG_                 (0x00800000)
+#define RX_CMD_A_RED_                  (0x00400000)
+#define RX_CMD_A_RX_ERRS_MASK_         (0xC03F0000)
+#define RX_CMD_A_RWT_                  (0x00200000)
+#define RX_CMD_A_RUNT_                 (0x00100000)
+#define RX_CMD_A_LONG_                 (0x00080000)
+#define RX_CMD_A_RXE_                  (0x00040000)
+#define RX_CMD_A_DRB_                  (0x00020000)
+#define RX_CMD_A_FCS_                  (0x00010000)
+#define RX_CMD_A_UAM_                  (0x00008000)
+#define RX_CMD_A_ICSM_                 (0x00004000)
+#define RX_CMD_A_LEN_MASK_             (0x00003FFF)
+
+/* Rx Command B */
+#define RX_CMD_B_CSUM_SHIFT_           (16)
+#define RX_CMD_B_CSUM_MASK_            (0xFFFF0000)
+#define RX_CMD_B_VTAG_MASK_            (0x0000FFFF)
+#define RX_CMD_B_VTAG_PRI_MASK_                (0x0000E000)
+#define RX_CMD_B_VTAG_CFI_MASK_                (0x00001000)
+#define RX_CMD_B_VTAG_VID_MASK_                (0x00000FFF)
+
+/* Rx Command C */
+#define RX_CMD_C_WAKE_SHIFT_           (15)
+#define RX_CMD_C_WAKE_                 (0x8000)
+#define RX_CMD_C_REF_FAIL_SHIFT_       (14)
+#define RX_CMD_C_REF_FAIL_             (0x4000)
+
+/* SCSRs */
+#define NUMBER_OF_REGS                 (193)
+
+#define ID_REV                         (0x00)
+#define ID_REV_CHIP_ID_MASK_           (0xFFFF0000)
+#define ID_REV_CHIP_REV_MASK_          (0x0000FFFF)
+#define ID_REV_CHIP_ID_7800_           (0x7800)
+
+#define FPGA_REV                       (0x04)
+#define FPGA_REV_MINOR_MASK_           (0x0000FF00)
+#define FPGA_REV_MAJOR_MASK_           (0x000000FF)
+
+#define INT_STS                                (0x0C)
+#define INT_STS_CLEAR_ALL_             (0xFFFFFFFF)
+#define INT_STS_EEE_TX_LPI_STRT_       (0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_       (0x02000000)
+#define INT_STS_EEE_RX_LPI_            (0x01000000)
+#define INT_STS_RDFO_                  (0x00400000)
+#define INT_STS_TXE_                   (0x00200000)
+#define INT_STS_TX_DIS_                        (0x00080000)
+#define INT_STS_RX_DIS_                        (0x00040000)
+#define INT_STS_PHY_INT_               (0x00020000)
+#define INT_STS_DP_INT_                        (0x00010000)
+#define INT_STS_MAC_ERR_               (0x00008000)
+#define INT_STS_TDFU_                  (0x00004000)
+#define INT_STS_TDFO_                  (0x00002000)
+#define INT_STS_UFX_FP_                        (0x00001000)
+#define INT_STS_GPIO_MASK_             (0x00000FFF)
+#define INT_STS_GPIO11_                        (0x00000800)
+#define INT_STS_GPIO10_                        (0x00000400)
+#define INT_STS_GPIO9_                 (0x00000200)
+#define INT_STS_GPIO8_                 (0x00000100)
+#define INT_STS_GPIO7_                 (0x00000080)
+#define INT_STS_GPIO6_                 (0x00000040)
+#define INT_STS_GPIO5_                 (0x00000020)
+#define INT_STS_GPIO4_                 (0x00000010)
+#define INT_STS_GPIO3_                 (0x00000008)
+#define INT_STS_GPIO2_                 (0x00000004)
+#define INT_STS_GPIO1_                 (0x00000002)
+#define INT_STS_GPIO0_                 (0x00000001)
+
+#define HW_CFG                         (0x010)
+#define HW_CFG_CLK125_EN_              (0x02000000)
+#define HW_CFG_REFCLK25_EN_            (0x01000000)
+#define HW_CFG_LED3_EN_                        (0x00800000)
+#define HW_CFG_LED2_EN_                        (0x00400000)
+#define HW_CFG_LED1_EN_                        (0x00200000)
+#define HW_CFG_LED0_EN_                        (0x00100000)
+#define HW_CFG_EEE_PHY_LUSU_           (0x00020000)
+#define HW_CFG_EEE_TSU_                        (0x00010000)
+#define HW_CFG_NETDET_STS_             (0x00008000)
+#define HW_CFG_NETDET_EN_              (0x00004000)
+#define HW_CFG_EEM_                    (0x00002000)
+#define HW_CFG_RST_PROTECT_            (0x00001000)
+#define HW_CFG_CONNECT_BUF_            (0x00000400)
+#define HW_CFG_CONNECT_EN_             (0x00000200)
+#define HW_CFG_CONNECT_POL_            (0x00000100)
+#define HW_CFG_SUSPEND_N_SEL_MASK_     (0x000000C0)
+#define HW_CFG_SUSPEND_N_SEL_2         (0x00000000)
+#define HW_CFG_SUSPEND_N_SEL_12N       (0x00000040)
+#define HW_CFG_SUSPEND_N_SEL_012N      (0x00000080)
+#define HW_CFG_SUSPEND_N_SEL_0123N     (0x000000C0)
+#define HW_CFG_SUSPEND_N_POL_          (0x00000020)
+#define HW_CFG_MEF_                    (0x00000010)
+#define HW_CFG_ETC_                    (0x00000008)
+#define HW_CFG_LRST_                   (0x00000002)
+#define HW_CFG_SRST_                   (0x00000001)
+
+#define PMT_CTL                                (0x014)
+#define PMT_CTL_EEE_WAKEUP_EN_         (0x00002000)
+#define PMT_CTL_EEE_WUPS_              (0x00001000)
+#define PMT_CTL_MAC_SRST_              (0x00000800)
+#define PMT_CTL_PHY_PWRUP_             (0x00000400)
+#define PMT_CTL_RES_CLR_WKP_MASK_      (0x00000300)
+#define PMT_CTL_RES_CLR_WKP_STS_       (0x00000200)
+#define PMT_CTL_RES_CLR_WKP_EN_                (0x00000100)
+#define PMT_CTL_READY_                 (0x00000080)
+#define PMT_CTL_SUS_MODE_MASK_         (0x00000060)
+#define PMT_CTL_SUS_MODE_0_            (0x00000000)
+#define PMT_CTL_SUS_MODE_1_            (0x00000020)
+#define PMT_CTL_SUS_MODE_2_            (0x00000040)
+#define PMT_CTL_SUS_MODE_3_            (0x00000060)
+#define PMT_CTL_PHY_RST_               (0x00000010)
+#define PMT_CTL_WOL_EN_                        (0x00000008)
+#define PMT_CTL_PHY_WAKE_EN_           (0x00000004)
+#define PMT_CTL_WUPS_MASK_             (0x00000003)
+#define PMT_CTL_WUPS_MLT_              (0x00000003)
+#define PMT_CTL_WUPS_MAC_              (0x00000002)
+#define PMT_CTL_WUPS_PHY_              (0x00000001)
+
+#define GPIO_CFG0                      (0x018)
+#define GPIO_CFG0_GPIOEN_MASK_         (0x0000F000)
+#define GPIO_CFG0_GPIOEN3_             (0x00008000)
+#define GPIO_CFG0_GPIOEN2_             (0x00004000)
+#define GPIO_CFG0_GPIOEN1_             (0x00002000)
+#define GPIO_CFG0_GPIOEN0_             (0x00001000)
+#define GPIO_CFG0_GPIOBUF_MASK_                (0x00000F00)
+#define GPIO_CFG0_GPIOBUF3_            (0x00000800)
+#define GPIO_CFG0_GPIOBUF2_            (0x00000400)
+#define GPIO_CFG0_GPIOBUF1_            (0x00000200)
+#define GPIO_CFG0_GPIOBUF0_            (0x00000100)
+#define GPIO_CFG0_GPIODIR_MASK_                (0x000000F0)
+#define GPIO_CFG0_GPIODIR3_            (0x00000080)
+#define GPIO_CFG0_GPIODIR2_            (0x00000040)
+#define GPIO_CFG0_GPIODIR1_            (0x00000020)
+#define GPIO_CFG0_GPIODIR0_            (0x00000010)
+#define GPIO_CFG0_GPIOD_MASK_          (0x0000000F)
+#define GPIO_CFG0_GPIOD3_              (0x00000008)
+#define GPIO_CFG0_GPIOD2_              (0x00000004)
+#define GPIO_CFG0_GPIOD1_              (0x00000002)
+#define GPIO_CFG0_GPIOD0_              (0x00000001)
+
+#define GPIO_CFG1                      (0x01C)
+#define GPIO_CFG1_GPIOEN_MASK_         (0xFF000000)
+#define GPIO_CFG1_GPIOEN11_            (0x80000000)
+#define GPIO_CFG1_GPIOEN10_            (0x40000000)
+#define GPIO_CFG1_GPIOEN9_             (0x20000000)
+#define GPIO_CFG1_GPIOEN8_             (0x10000000)
+#define GPIO_CFG1_GPIOEN7_             (0x08000000)
+#define GPIO_CFG1_GPIOEN6_             (0x04000000)
+#define GPIO_CFG1_GPIOEN5_             (0x02000000)
+#define GPIO_CFG1_GPIOEN4_             (0x01000000)
+#define GPIO_CFG1_GPIOBUF_MASK_                (0x00FF0000)
+#define GPIO_CFG1_GPIOBUF11_           (0x00800000)
+#define GPIO_CFG1_GPIOBUF10_           (0x00400000)
+#define GPIO_CFG1_GPIOBUF9_            (0x00200000)
+#define GPIO_CFG1_GPIOBUF8_            (0x00100000)
+#define GPIO_CFG1_GPIOBUF7_            (0x00080000)
+#define GPIO_CFG1_GPIOBUF6_            (0x00040000)
+#define GPIO_CFG1_GPIOBUF5_            (0x00020000)
+#define GPIO_CFG1_GPIOBUF4_            (0x00010000)
+#define GPIO_CFG1_GPIODIR_MASK_                (0x0000FF00)
+#define GPIO_CFG1_GPIODIR11_           (0x00008000)
+#define GPIO_CFG1_GPIODIR10_           (0x00004000)
+#define GPIO_CFG1_GPIODIR9_            (0x00002000)
+#define GPIO_CFG1_GPIODIR8_            (0x00001000)
+#define GPIO_CFG1_GPIODIR7_            (0x00000800)
+#define GPIO_CFG1_GPIODIR6_            (0x00000400)
+#define GPIO_CFG1_GPIODIR5_            (0x00000200)
+#define GPIO_CFG1_GPIODIR4_            (0x00000100)
+#define GPIO_CFG1_GPIOD_MASK_          (0x000000FF)
+#define GPIO_CFG1_GPIOD11_             (0x00000080)
+#define GPIO_CFG1_GPIOD10_             (0x00000040)
+#define GPIO_CFG1_GPIOD9_              (0x00000020)
+#define GPIO_CFG1_GPIOD8_              (0x00000010)
+#define GPIO_CFG1_GPIOD7_              (0x00000008)
+#define GPIO_CFG1_GPIOD6_              (0x00000004)
+#define GPIO_CFG1_GPIOD6_              (0x00000004)
+#define GPIO_CFG1_GPIOD5_              (0x00000002)
+#define GPIO_CFG1_GPIOD4_              (0x00000001)
+
+#define GPIO_WAKE                      (0x020)
+#define GPIO_WAKE_GPIOPOL_MASK_                (0x0FFF0000)
+#define GPIO_WAKE_GPIOPOL11_           (0x08000000)
+#define GPIO_WAKE_GPIOPOL10_           (0x04000000)
+#define GPIO_WAKE_GPIOPOL9_            (0x02000000)
+#define GPIO_WAKE_GPIOPOL8_            (0x01000000)
+#define GPIO_WAKE_GPIOPOL7_            (0x00800000)
+#define GPIO_WAKE_GPIOPOL6_            (0x00400000)
+#define GPIO_WAKE_GPIOPOL5_            (0x00200000)
+#define GPIO_WAKE_GPIOPOL4_            (0x00100000)
+#define GPIO_WAKE_GPIOPOL3_            (0x00080000)
+#define GPIO_WAKE_GPIOPOL2_            (0x00040000)
+#define GPIO_WAKE_GPIOPOL1_            (0x00020000)
+#define GPIO_WAKE_GPIOPOL0_            (0x00010000)
+#define GPIO_WAKE_GPIOWK_MASK_         (0x00000FFF)
+#define GPIO_WAKE_GPIOWK11_            (0x00000800)
+#define GPIO_WAKE_GPIOWK10_            (0x00000400)
+#define GPIO_WAKE_GPIOWK9_             (0x00000200)
+#define GPIO_WAKE_GPIOWK8_             (0x00000100)
+#define GPIO_WAKE_GPIOWK7_             (0x00000080)
+#define GPIO_WAKE_GPIOWK6_             (0x00000040)
+#define GPIO_WAKE_GPIOWK5_             (0x00000020)
+#define GPIO_WAKE_GPIOWK4_             (0x00000010)
+#define GPIO_WAKE_GPIOWK3_             (0x00000008)
+#define GPIO_WAKE_GPIOWK2_             (0x00000004)
+#define GPIO_WAKE_GPIOWK1_             (0x00000002)
+#define GPIO_WAKE_GPIOWK0_             (0x00000001)
+
+#define DP_SEL                         (0x024)
+#define DP_SEL_DPRDY_                  (0x80000000)
+#define DP_SEL_RSEL_MASK_              (0x0000000F)
+#define DP_SEL_RSEL_USB_PHY_CSRS_      (0x0000000F)
+#define DP_SEL_RSEL_OTP_64BIT_         (0x00000009)
+#define DP_SEL_RSEL_OTP_8BIT_          (0x00000008)
+#define DP_SEL_RSEL_UTX_BUF_RAM_       (0x00000007)
+#define DP_SEL_RSEL_DESC_RAM_          (0x00000005)
+#define DP_SEL_RSEL_TXFIFO_            (0x00000004)
+#define DP_SEL_RSEL_RXFIFO_            (0x00000003)
+#define DP_SEL_RSEL_LSO_               (0x00000002)
+#define DP_SEL_RSEL_VLAN_DA_           (0x00000001)
+#define DP_SEL_RSEL_URXBUF_            (0x00000000)
+#define DP_SEL_VHF_HASH_LEN            (16)
+#define DP_SEL_VHF_VLAN_LEN            (128)
+
+#define DP_CMD                         (0x028)
+#define DP_CMD_WRITE_                  (0x00000001)
+#define DP_CMD_READ_                   (0x00000000)
+
+#define DP_ADDR                                (0x02C)
+#define DP_ADDR_MASK_                  (0x00003FFF)
+
+#define DP_DATA                                (0x030)
+
+#define E2P_CMD                                (0x040)
+#define E2P_CMD_EPC_BUSY_              (0x80000000)
+#define E2P_CMD_EPC_CMD_MASK_          (0x70000000)
+#define E2P_CMD_EPC_CMD_RELOAD_                (0x70000000)
+#define E2P_CMD_EPC_CMD_ERAL_          (0x60000000)
+#define E2P_CMD_EPC_CMD_ERASE_         (0x50000000)
+#define E2P_CMD_EPC_CMD_WRAL_          (0x40000000)
+#define E2P_CMD_EPC_CMD_WRITE_         (0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_          (0x20000000)
+#define E2P_CMD_EPC_CMD_EWDS_          (0x10000000)
+#define E2P_CMD_EPC_CMD_READ_          (0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_           (0x00000400)
+#define E2P_CMD_EPC_DL_                        (0x00000200)
+#define E2P_CMD_EPC_ADDR_MASK_         (0x000001FF)
+
+#define E2P_DATA                       (0x044)
+#define E2P_DATA_EEPROM_DATA_MASK_     (0x000000FF)
+
+#define BOS_ATTR                       (0x050)
+#define BOS_ATTR_BLOCK_SIZE_MASK_      (0x000000FF)
+
+#define SS_ATTR                                (0x054)
+#define SS_ATTR_POLL_INT_MASK_         (0x00FF0000)
+#define SS_ATTR_DEV_DESC_SIZE_MASK_    (0x0000FF00)
+#define SS_ATTR_CFG_BLK_SIZE_MASK_     (0x000000FF)
+
+#define HS_ATTR                                (0x058)
+#define HS_ATTR_POLL_INT_MASK_         (0x00FF0000)
+#define HS_ATTR_DEV_DESC_SIZE_MASK_    (0x0000FF00)
+#define HS_ATTR_CFG_BLK_SIZE_MASK_     (0x000000FF)
+
+#define FS_ATTR                                (0x05C)
+#define FS_ATTR_POLL_INT_MASK_         (0x00FF0000)
+#define FS_ATTR_DEV_DESC_SIZE_MASK_    (0x0000FF00)
+#define FS_ATTR_CFG_BLK_SIZE_MASK_     (0x000000FF)
+
+#define STR_ATTR0                          (0x060)
+#define STR_ATTR0_CFGSTR_DESC_SIZE_MASK_    (0xFF000000)
+#define STR_ATTR0_SERSTR_DESC_SIZE_MASK_    (0x00FF0000)
+#define STR_ATTR0_PRODSTR_DESC_SIZE_MASK_   (0x0000FF00)
+#define STR_ATTR0_MANUF_DESC_SIZE_MASK_     (0x000000FF)
+
+#define STR_ATTR1                          (0x064)
+#define STR_ATTR1_INTSTR_DESC_SIZE_MASK_    (0x000000FF)
+
+#define STR_FLAG_ATTR                      (0x068)
+#define STR_FLAG_ATTR_PME_FLAGS_MASK_      (0x000000FF)
+
+#define USB_CFG0                       (0x080)
+#define USB_CFG_LPM_RESPONSE_          (0x80000000)
+#define USB_CFG_LPM_CAPABILITY_                (0x40000000)
+#define USB_CFG_LPM_ENBL_SLPM_         (0x20000000)
+#define USB_CFG_HIRD_THR_MASK_         (0x1F000000)
+#define USB_CFG_HIRD_THR_960_          (0x1C000000)
+#define USB_CFG_HIRD_THR_885_          (0x1B000000)
+#define USB_CFG_HIRD_THR_810_          (0x1A000000)
+#define USB_CFG_HIRD_THR_735_          (0x19000000)
+#define USB_CFG_HIRD_THR_660_          (0x18000000)
+#define USB_CFG_HIRD_THR_585_          (0x17000000)
+#define USB_CFG_HIRD_THR_510_          (0x16000000)
+#define USB_CFG_HIRD_THR_435_          (0x15000000)
+#define USB_CFG_HIRD_THR_360_          (0x14000000)
+#define USB_CFG_HIRD_THR_285_          (0x13000000)
+#define USB_CFG_HIRD_THR_210_          (0x12000000)
+#define USB_CFG_HIRD_THR_135_          (0x11000000)
+#define USB_CFG_HIRD_THR_60_           (0x10000000)
+#define USB_CFG_MAX_BURST_BI_MASK_     (0x00F00000)
+#define USB_CFG_MAX_BURST_BO_MASK_     (0x000F0000)
+#define USB_CFG_MAX_DEV_SPEED_MASK_    (0x0000E000)
+#define USB_CFG_MAX_DEV_SPEED_SS_      (0x00008000)
+#define USB_CFG_MAX_DEV_SPEED_HS_      (0x00000000)
+#define USB_CFG_MAX_DEV_SPEED_FS_      (0x00002000)
+#define USB_CFG_PHY_BOOST_MASK_                (0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_12_     (0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_8_      (0x00000100)
+#define USB_CFG_PHY_BOOST_PLUS_4_      (0x00000080)
+#define USB_CFG_PHY_BOOST_NORMAL_      (0x00000000)
+#define USB_CFG_BIR_                   (0x00000040)
+#define USB_CFG_BCE_                   (0x00000020)
+#define USB_CFG_PORT_SWAP_             (0x00000010)
+#define USB_CFG_LPM_EN_                        (0x00000008)
+#define USB_CFG_RMT_WKP_               (0x00000004)
+#define USB_CFG_PWR_SEL_               (0x00000002)
+#define USB_CFG_STALL_BO_DIS_          (0x00000001)
+
+#define USB_CFG1                       (0x084)
+#define USB_CFG1_U1_TIMEOUT_MASK_      (0xFF000000)
+#define USB_CFG1_U2_TIMEOUT_MASK_      (0x00FF0000)
+#define USB_CFG1_HS_TOUT_CAL_MASK_     (0x0000E000)
+#define USB_CFG1_DEV_U2_INIT_EN_       (0x00001000)
+#define USB_CFG1_DEV_U2_EN_            (0x00000800)
+#define USB_CFG1_DEV_U1_INIT_EN_       (0x00000400)
+#define USB_CFG1_DEV_U1_EN_            (0x00000200)
+#define USB_CFG1_LTM_ENABLE_           (0x00000100)
+#define USB_CFG1_FS_TOUT_CAL_MASK_     (0x00000070)
+#define USB_CFG1_SCALE_DOWN_MASK_      (0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE3_     (0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE2_     (0x00000002)
+#define USB_CFG1_SCALE_DOWN_MODE1_     (0x00000001)
+#define USB_CFG1_SCALE_DOWN_MODE0_     (0x00000000)
+
+#define USB_CFG2                           (0x088)
+#define USB_CFG2_SS_DETACH_TIME_MASK_      (0xFFFF0000)
+#define USB_CFG2_HS_DETACH_TIME_MASK_      (0x0000FFFF)
+
+#define BURST_CAP                      (0x090)
+#define BURST_CAP_SIZE_MASK_           (0x000000FF)
+
+#define BULK_IN_DLY                    (0x094)
+#define BULK_IN_DLY_MASK_              (0x0000FFFF)
+
+#define INT_EP_CTL                     (0x098)
+#define INT_EP_INTEP_ON_               (0x80000000)
+#define INT_STS_EEE_TX_LPI_STRT_EN_    (0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_EN_    (0x02000000)
+#define INT_STS_EEE_RX_LPI_EN_         (0x01000000)
+#define INT_EP_RDFO_EN_                        (0x00400000)
+#define INT_EP_TXE_EN_                 (0x00200000)
+#define INT_EP_TX_DIS_EN_              (0x00080000)
+#define INT_EP_RX_DIS_EN_              (0x00040000)
+#define INT_EP_PHY_INT_EN_             (0x00020000)
+#define INT_EP_DP_INT_EN_              (0x00010000)
+#define INT_EP_MAC_ERR_EN_             (0x00008000)
+#define INT_EP_TDFU_EN_                        (0x00004000)
+#define INT_EP_TDFO_EN_                        (0x00002000)
+#define INT_EP_UTX_FP_EN_              (0x00001000)
+#define INT_EP_GPIO_EN_MASK_           (0x00000FFF)
+
+#define PIPE_CTL                       (0x09C)
+#define PIPE_CTL_TXSWING_              (0x00000040)
+#define PIPE_CTL_TXMARGIN_MASK_                (0x00000038)
+#define PIPE_CTL_TXDEEMPHASIS_MASK_    (0x00000006)
+#define PIPE_CTL_ELASTICITYBUFFERMODE_ (0x00000001)
+
+#define U1_LATENCY                     (0xA0)
+#define U2_LATENCY                     (0xA4)
+
+#define USB_STATUS                     (0x0A8)
+#define USB_STATUS_REMOTE_WK_          (0x00100000)
+#define USB_STATUS_FUNC_REMOTE_WK_     (0x00080000)
+#define USB_STATUS_LTM_ENABLE_         (0x00040000)
+#define USB_STATUS_U2_ENABLE_          (0x00020000)
+#define USB_STATUS_U1_ENABLE_          (0x00010000)
+#define USB_STATUS_SET_SEL_            (0x00000020)
+#define USB_STATUS_REMOTE_WK_STS_      (0x00000010)
+#define USB_STATUS_FUNC_REMOTE_WK_STS_ (0x00000008)
+#define USB_STATUS_LTM_ENABLE_STS_     (0x00000004)
+#define USB_STATUS_U2_ENABLE_STS_      (0x00000002)
+#define USB_STATUS_U1_ENABLE_STS_      (0x00000001)
+
+#define USB_CFG3                       (0x0AC)
+#define USB_CFG3_EN_U2_LTM_            (0x40000000)
+#define USB_CFG3_BULK_OUT_NUMP_OVR_    (0x20000000)
+#define USB_CFG3_DIS_FAST_U1_EXIT_     (0x10000000)
+#define USB_CFG3_LPM_NYET_THR_         (0x0F000000)
+#define USB_CFG3_RX_DET_2_POL_LFPS_    (0x00800000)
+#define USB_CFG3_LFPS_FILT_            (0x00400000)
+#define USB_CFG3_SKIP_RX_DET_          (0x00200000)
+#define USB_CFG3_DELAY_P1P2P3_         (0x001C0000)
+#define USB_CFG3_DELAY_PHY_PWR_CHG_    (0x00020000)
+#define USB_CFG3_U1U2_EXIT_FR_         (0x00010000)
+#define USB_CFG3_REQ_P1P2P3            (0x00008000)
+#define USB_CFG3_HST_PRT_CMPL_         (0x00004000)
+#define USB_CFG3_DIS_SCRAMB_           (0x00002000)
+#define USB_CFG3_PWR_DN_SCALE_         (0x00001FFF)
+
+#define RFE_CTL                                (0x0B0)
+#define RFE_CTL_IGMP_COE_              (0x00004000)
+#define RFE_CTL_ICMP_COE_              (0x00002000)
+#define RFE_CTL_TCPUDP_COE_            (0x00001000)
+#define RFE_CTL_IP_COE_                        (0x00000800)
+#define RFE_CTL_BCAST_EN_              (0x00000400)
+#define RFE_CTL_MCAST_EN_              (0x00000200)
+#define RFE_CTL_UCAST_EN_              (0x00000100)
+#define RFE_CTL_VLAN_STRIP_            (0x00000080)
+#define RFE_CTL_DISCARD_UNTAGGED_      (0x00000040)
+#define RFE_CTL_VLAN_FILTER_           (0x00000020)
+#define RFE_CTL_SA_FILTER_             (0x00000010)
+#define RFE_CTL_MCAST_HASH_            (0x00000008)
+#define RFE_CTL_DA_HASH_               (0x00000004)
+#define RFE_CTL_DA_PERFECT_            (0x00000002)
+#define RFE_CTL_RST_                   (0x00000001)
+
+#define VLAN_TYPE                      (0x0B4)
+#define VLAN_TYPE_MASK_                        (0x0000FFFF)
+
+#define FCT_RX_CTL                     (0x0C0)
+#define FCT_RX_CTL_EN_                 (0x80000000)
+#define FCT_RX_CTL_RST_                        (0x40000000)
+#define FCT_RX_CTL_SBF_                        (0x02000000)
+#define FCT_RX_CTL_OVFL_               (0x01000000)
+#define FCT_RX_CTL_DROP_               (0x00800000)
+#define FCT_RX_CTL_NOT_EMPTY_          (0x00400000)
+#define FCT_RX_CTL_EMPTY_              (0x00200000)
+#define FCT_RX_CTL_DIS_                        (0x00100000)
+#define FCT_RX_CTL_USED_MASK_          (0x0000FFFF)
+
+#define FCT_TX_CTL                     (0x0C4)
+#define FCT_TX_CTL_EN_                 (0x80000000)
+#define FCT_TX_CTL_RST_                        (0x40000000)
+#define FCT_TX_CTL_NOT_EMPTY_          (0x00400000)
+#define FCT_TX_CTL_EMPTY_              (0x00200000)
+#define FCT_TX_CTL_DIS_                        (0x00100000)
+#define FCT_TX_CTL_USED_MASK_          (0x0000FFFF)
+
+#define FCT_RX_FIFO_END                        (0x0C8)
+#define FCT_RX_FIFO_END_MASK_          (0x0000007F)
+
+#define FCT_TX_FIFO_END                        (0x0CC)
+#define FCT_TX_FIFO_END_MASK_          (0x0000003F)
+
+#define FCT_FLOW                       (0x0D0)
+#define FCT_FLOW_OFF_MASK_             (0x00007F00)
+#define FCT_FLOW_ON_MASK_              (0x0000007F)
+
+#define RX_DP_STOR                     (0x0D4)
+#define RX_DP_STORE_TOT_RXUSED_MASK_   (0xFFFF0000)
+#define RX_DP_STORE_UTX_RXUSED_MASK_   (0x0000FFFF)
+
+#define TX_DP_STOR                     (0x0D8)
+#define TX_DP_STORE_TOT_TXUSED_MASK_   (0xFFFF0000)
+#define TX_DP_STORE_URX_TXUSED_MASK_   (0x0000FFFF)
+
+#define LTM_BELT_IDLE0                 (0x0E0)
+#define LTM_BELT_IDLE0_IDLE1000_       (0x0FFF0000)
+#define LTM_BELT_IDLE0_IDLE100_                (0x00000FFF)
+
+#define LTM_BELT_IDLE1                 (0x0E4)
+#define LTM_BELT_IDLE1_IDLE10_         (0x00000FFF)
+
+#define LTM_BELT_ACT0                  (0x0E8)
+#define LTM_BELT_ACT0_ACT1000_         (0x0FFF0000)
+#define LTM_BELT_ACT0_ACT100_          (0x00000FFF)
+
+#define LTM_BELT_ACT1                  (0x0EC)
+#define LTM_BELT_ACT1_ACT10_           (0x00000FFF)
+
+#define LTM_INACTIVE0                  (0x0F0)
+#define LTM_INACTIVE0_TIMER1000_       (0xFFFF0000)
+#define LTM_INACTIVE0_TIMER100_                (0x0000FFFF)
+
+#define LTM_INACTIVE1                  (0x0F4)
+#define LTM_INACTIVE1_TIMER10_         (0x0000FFFF)
+
+#define MAC_CR                         (0x100)
+#define MAC_CR_GMII_EN_                        (0x00080000)
+#define MAC_CR_EEE_TX_CLK_STOP_EN_     (0x00040000)
+#define MAC_CR_EEE_EN_                 (0x00020000)
+#define MAC_CR_EEE_TLAR_EN_            (0x00010000)
+#define MAC_CR_ADP_                    (0x00002000)
+#define MAC_CR_AUTO_DUPLEX_            (0x00001000)
+#define MAC_CR_AUTO_SPEED_             (0x00000800)
+#define MAC_CR_LOOPBACK_               (0x00000400)
+#define MAC_CR_BOLMT_MASK_             (0x000000C0)
+#define MAC_CR_FULL_DUPLEX_            (0x00000008)
+#define MAC_CR_SPEED_MASK_             (0x00000006)
+#define MAC_CR_SPEED_1000_             (0x00000004)
+#define MAC_CR_SPEED_100_              (0x00000002)
+#define MAC_CR_SPEED_10_               (0x00000000)
+#define MAC_CR_RST_                    (0x00000001)
+
+#define MAC_RX                         (0x104)
+#define MAC_RX_MAX_SIZE_SHIFT_         (16)
+#define MAC_RX_MAX_SIZE_MASK_          (0x3FFF0000)
+#define MAC_RX_FCS_STRIP_              (0x00000010)
+#define MAC_RX_VLAN_FSE_               (0x00000004)
+#define MAC_RX_RXD_                    (0x00000002)
+#define MAC_RX_RXEN_                   (0x00000001)
+
+#define MAC_TX                         (0x108)
+#define MAC_TX_BAD_FCS_                        (0x00000004)
+#define MAC_TX_TXD_                    (0x00000002)
+#define MAC_TX_TXEN_                   (0x00000001)
+
+#define FLOW                           (0x10C)
+#define FLOW_CR_FORCE_FC_              (0x80000000)
+#define FLOW_CR_TX_FCEN_               (0x40000000)
+#define FLOW_CR_RX_FCEN_               (0x20000000)
+#define FLOW_CR_FPF_                   (0x10000000)
+#define FLOW_CR_FCPT_MASK_             (0x0000FFFF)
+
+#define RAND_SEED                      (0x110)
+#define RAND_SEED_MASK_                        (0x0000FFFF)
+
+#define ERR_STS                                (0x114)
+#define ERR_STS_FERR_                  (0x00000100)
+#define ERR_STS_LERR_                  (0x00000080)
+#define ERR_STS_RFERR_                 (0x00000040)
+#define ERR_STS_ECERR_                 (0x00000010)
+#define ERR_STS_ALERR_                 (0x00000008)
+#define ERR_STS_URERR_                 (0x00000004)
+
+#define RX_ADDRH                       (0x118)
+#define RX_ADDRH_MASK_                 (0x0000FFFF)
+
+#define RX_ADDRL                       (0x11C)
+#define RX_ADDRL_MASK_                 (0xFFFFFFFF)
+
+#define MII_ACC                                (0x120)
+#define MII_ACC_PHY_ADDR_SHIFT_                (11)
+#define MII_ACC_PHY_ADDR_MASK_         (0x0000F800)
+#define MII_ACC_MIIRINDA_SHIFT_                (6)
+#define MII_ACC_MIIRINDA_MASK_         (0x000007C0)
+#define MII_ACC_MII_READ_              (0x00000000)
+#define MII_ACC_MII_WRITE_             (0x00000002)
+#define MII_ACC_MII_BUSY_              (0x00000001)
+
+#define MII_DATA                       (0x124)
+#define MII_DATA_MASK_                 (0x0000FFFF)
+
+#define MAC_RGMII_ID                   (0x128)
+#define MAC_RGMII_ID_TXC_DELAY_EN_     (0x00000002)
+#define MAC_RGMII_ID_RXC_DELAY_EN_     (0x00000001)
+
+#define EEE_TX_LPI_REQ_DLY             (0x130)
+#define EEE_TX_LPI_REQ_DLY_CNT_MASK_   (0xFFFFFFFF)
+
+#define EEE_TW_TX_SYS                  (0x134)
+#define EEE_TW_TX_SYS_CNT1G_MASK_      (0xFFFF0000)
+#define EEE_TW_TX_SYS_CNT100M_MASK_    (0x0000FFFF)
+
+#define EEE_TX_LPI_REM_DLY             (0x138)
+#define EEE_TX_LPI_REM_DLY_CNT_                (0x00FFFFFF)
+
+#define WUCSR                          (0x140)
+#define WUCSR_TESTMODE_                        (0x80000000)
+#define WUCSR_RFE_WAKE_EN_             (0x00004000)
+#define WUCSR_EEE_TX_WAKE_             (0x00002000)
+#define WUCSR_EEE_TX_WAKE_EN_          (0x00001000)
+#define WUCSR_EEE_RX_WAKE_             (0x00000800)
+#define WUCSR_EEE_RX_WAKE_EN_          (0x00000400)
+#define WUCSR_RFE_WAKE_FR_             (0x00000200)
+#define WUCSR_STORE_WAKE_              (0x00000100)
+#define WUCSR_PFDA_FR_                 (0x00000080)
+#define WUCSR_WUFR_                    (0x00000040)
+#define WUCSR_MPR_                     (0x00000020)
+#define WUCSR_BCST_FR_                 (0x00000010)
+#define WUCSR_PFDA_EN_                 (0x00000008)
+#define WUCSR_WAKE_EN_                 (0x00000004)
+#define WUCSR_MPEN_                    (0x00000002)
+#define WUCSR_BCST_EN_                 (0x00000001)
+
+#define WK_SRC                         (0x144)
+#define WK_SRC_GPIOX_INT_WK_SHIFT_     (20)
+#define WK_SRC_GPIOX_INT_WK_MASK_      (0xFFF00000)
+#define WK_SRC_IPV6_TCPSYN_RCD_WK_     (0x00010000)
+#define WK_SRC_IPV4_TCPSYN_RCD_WK_     (0x00008000)
+#define WK_SRC_EEE_TX_WK_              (0x00004000)
+#define WK_SRC_EEE_RX_WK_              (0x00002000)
+#define WK_SRC_GOOD_FR_WK_             (0x00001000)
+#define WK_SRC_PFDA_FR_WK_             (0x00000800)
+#define WK_SRC_MP_FR_WK_               (0x00000400)
+#define WK_SRC_BCAST_FR_WK_            (0x00000200)
+#define WK_SRC_WU_FR_WK_               (0x00000100)
+#define WK_SRC_WUFF_MATCH_MASK_                (0x0000001F)
+
+#define WUF_CFG0                       (0x150)
+#define NUM_OF_WUF_CFG                 (32)
+#define WUF_CFG_BEGIN                  (WUF_CFG0)
+#define WUF_CFG(index)                 (WUF_CFG_BEGIN + (4 * (index)))
+#define WUF_CFGX_EN_                   (0x80000000)
+#define WUF_CFGX_TYPE_MASK_            (0x03000000)
+#define WUF_CFGX_TYPE_MCAST_           (0x02000000)
+#define WUF_CFGX_TYPE_ALL_             (0x01000000)
+#define WUF_CFGX_TYPE_UCAST_           (0x00000000)
+#define WUF_CFGX_OFFSET_SHIFT_         (16)
+#define WUF_CFGX_OFFSET_MASK_          (0x00FF0000)
+#define WUF_CFGX_CRC16_MASK_           (0x0000FFFF)
+
+#define WUF_MASK0_0                    (0x200)
+#define WUF_MASK0_1                    (0x204)
+#define WUF_MASK0_2                    (0x208)
+#define WUF_MASK0_3                    (0x20C)
+#define NUM_OF_WUF_MASK                        (32)
+#define WUF_MASK0_BEGIN                        (WUF_MASK0_0)
+#define WUF_MASK1_BEGIN                        (WUF_MASK0_1)
+#define WUF_MASK2_BEGIN                        (WUF_MASK0_2)
+#define WUF_MASK3_BEGIN                        (WUF_MASK0_3)
+#define WUF_MASK0(index)               (WUF_MASK0_BEGIN + (0x10 * (index)))
+#define WUF_MASK1(index)               (WUF_MASK1_BEGIN + (0x10 * (index)))
+#define WUF_MASK2(index)               (WUF_MASK2_BEGIN + (0x10 * (index)))
+#define WUF_MASK3(index)               (WUF_MASK3_BEGIN + (0x10 * (index)))
+
+#define MAF_BASE                       (0x400)
+#define MAF_HIX                                (0x00)
+#define MAF_LOX                                (0x04)
+#define NUM_OF_MAF                     (33)
+#define MAF_HI_BEGIN                   (MAF_BASE + MAF_HIX)
+#define MAF_LO_BEGIN                   (MAF_BASE + MAF_LOX)
+#define MAF_HI(index)                  (MAF_BASE + (8 * (index)) + (MAF_HIX))
+#define MAF_LO(index)                  (MAF_BASE + (8 * (index)) + (MAF_LOX))
+#define MAF_HI_VALID_                  (0x80000000)
+#define MAF_HI_TYPE_MASK_              (0x40000000)
+#define MAF_HI_TYPE_SRC_               (0x40000000)
+#define MAF_HI_TYPE_DST_               (0x00000000)
+#define MAF_HI_ADDR_MASK               (0x0000FFFF)
+#define MAF_LO_ADDR_MASK               (0xFFFFFFFF)
+
+#define WUCSR2                         (0x600)
+#define WUCSR2_CSUM_DISABLE_           (0x80000000)
+#define WUCSR2_NA_SA_SEL_              (0x00000100)
+#define WUCSR2_NS_RCD_                 (0x00000080)
+#define WUCSR2_ARP_RCD_                        (0x00000040)
+#define WUCSR2_IPV6_TCPSYN_RCD_                (0x00000020)
+#define WUCSR2_IPV4_TCPSYN_RCD_                (0x00000010)
+#define WUCSR2_NS_OFFLOAD_EN_          (0x00000008)
+#define WUCSR2_ARP_OFFLOAD_EN_         (0x00000004)
+#define WUCSR2_IPV6_TCPSYN_WAKE_EN_    (0x00000002)
+#define WUCSR2_IPV4_TCPSYN_WAKE_EN_    (0x00000001)
+
+#define NS1_IPV6_ADDR_DEST0            (0x610)
+#define NS1_IPV6_ADDR_DEST1            (0x614)
+#define NS1_IPV6_ADDR_DEST2            (0x618)
+#define NS1_IPV6_ADDR_DEST3            (0x61C)
+
+#define NS1_IPV6_ADDR_SRC0             (0x620)
+#define NS1_IPV6_ADDR_SRC1             (0x624)
+#define NS1_IPV6_ADDR_SRC2             (0x628)
+#define NS1_IPV6_ADDR_SRC3             (0x62C)
+
+#define NS1_ICMPV6_ADDR0_0             (0x630)
+#define NS1_ICMPV6_ADDR0_1             (0x634)
+#define NS1_ICMPV6_ADDR0_2             (0x638)
+#define NS1_ICMPV6_ADDR0_3             (0x63C)
+
+#define NS1_ICMPV6_ADDR1_0             (0x640)
+#define NS1_ICMPV6_ADDR1_1             (0x644)
+#define NS1_ICMPV6_ADDR1_2             (0x648)
+#define NS1_ICMPV6_ADDR1_3             (0x64C)
+
+#define NS2_IPV6_ADDR_DEST0            (0x650)
+#define NS2_IPV6_ADDR_DEST1            (0x654)
+#define NS2_IPV6_ADDR_DEST2            (0x658)
+#define NS2_IPV6_ADDR_DEST3            (0x65C)
+
+#define NS2_IPV6_ADDR_SRC0             (0x660)
+#define NS2_IPV6_ADDR_SRC1             (0x664)
+#define NS2_IPV6_ADDR_SRC2             (0x668)
+#define NS2_IPV6_ADDR_SRC3             (0x66C)
+
+#define NS2_ICMPV6_ADDR0_0             (0x670)
+#define NS2_ICMPV6_ADDR0_1             (0x674)
+#define NS2_ICMPV6_ADDR0_2             (0x678)
+#define NS2_ICMPV6_ADDR0_3             (0x67C)
+
+#define NS2_ICMPV6_ADDR1_0             (0x680)
+#define NS2_ICMPV6_ADDR1_1             (0x684)
+#define NS2_ICMPV6_ADDR1_2             (0x688)
+#define NS2_ICMPV6_ADDR1_3             (0x68C)
+
+#define SYN_IPV4_ADDR_SRC              (0x690)
+#define SYN_IPV4_ADDR_DEST             (0x694)
+#define SYN_IPV4_TCP_PORTS             (0x698)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_SHIFT_    (16)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_MASK_     (0xFFFF0000)
+#define SYN_IPV4_TCP_PORTS_IPV4_SRC_PORT_MASK_     (0x0000FFFF)
+
+#define SYN_IPV6_ADDR_SRC0             (0x69C)
+#define SYN_IPV6_ADDR_SRC1             (0x6A0)
+#define SYN_IPV6_ADDR_SRC2             (0x6A4)
+#define SYN_IPV6_ADDR_SRC3             (0x6A8)
+
+#define SYN_IPV6_ADDR_DEST0            (0x6AC)
+#define SYN_IPV6_ADDR_DEST1            (0x6B0)
+#define SYN_IPV6_ADDR_DEST2            (0x6B4)
+#define SYN_IPV6_ADDR_DEST3            (0x6B8)
+
+#define SYN_IPV6_TCP_PORTS             (0x6BC)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_SHIFT_    (16)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_MASK_     (0xFFFF0000)
+#define SYN_IPV6_TCP_PORTS_IPV6_SRC_PORT_MASK_     (0x0000FFFF)
+
+#define ARP_SPA                                (0x6C0)
+#define ARP_TPA                                (0x6C4)
+
+#define PHY_DEV_ID                     (0x700)
+#define PHY_DEV_ID_REV_SHIFT_          (28)
+#define PHY_DEV_ID_REV_SHIFT_          (28)
+#define PHY_DEV_ID_REV_MASK_           (0xF0000000)
+#define PHY_DEV_ID_MODEL_SHIFT_                (22)
+#define PHY_DEV_ID_MODEL_MASK_         (0x0FC00000)
+#define PHY_DEV_ID_OUI_MASK_           (0x003FFFFF)
+
+#define OTP_BASE_ADDR                  (0x00001000)
+#define OTP_ADDR_RANGE_                        (0x1FF)
+
+#define OTP_PWR_DN                     (OTP_BASE_ADDR + 4 * 0x00)
+#define OTP_PWR_DN_PWRDN_N_            (0x01)
+
+#define OTP_ADDR1                      (OTP_BASE_ADDR + 4 * 0x01)
+#define OTP_ADDR1_15_11                        (0x1F)
+
+#define OTP_ADDR2                      (OTP_BASE_ADDR + 4 * 0x02)
+#define OTP_ADDR2_10_3                 (0xFF)
+
+#define OTP_ADDR3                      (OTP_BASE_ADDR + 4 * 0x03)
+#define OTP_ADDR3_2_0                  (0x03)
+
+#define OTP_PRGM_DATA                  (OTP_BASE_ADDR + 4 * 0x04)
+
+#define OTP_PRGM_MODE                  (OTP_BASE_ADDR + 4 * 0x05)
+#define OTP_PRGM_MODE_BYTE_            (0x01)
+
+#define OTP_RD_DATA                    (OTP_BASE_ADDR + 4 * 0x06)
+
+#define OTP_FUNC_CMD                   (OTP_BASE_ADDR + 4 * 0x08)
+#define OTP_FUNC_CMD_RESET_            (0x04)
+#define OTP_FUNC_CMD_PROGRAM_          (0x02)
+#define OTP_FUNC_CMD_READ_             (0x01)
+
+#define OTP_TST_CMD                    (OTP_BASE_ADDR + 4 * 0x09)
+#define OTP_TST_CMD_TEST_DEC_SEL_      (0x10)
+#define OTP_TST_CMD_PRGVRFY_           (0x08)
+#define OTP_TST_CMD_WRTEST_            (0x04)
+#define OTP_TST_CMD_TESTDEC_           (0x02)
+#define OTP_TST_CMD_BLANKCHECK_                (0x01)
+
+#define OTP_CMD_GO                     (OTP_BASE_ADDR + 4 * 0x0A)
+#define OTP_CMD_GO_GO_                 (0x01)
+
+#define OTP_PASS_FAIL                  (OTP_BASE_ADDR + 4 * 0x0B)
+#define OTP_PASS_FAIL_PASS_            (0x02)
+#define OTP_PASS_FAIL_FAIL_            (0x01)
+
+#define OTP_STATUS                     (OTP_BASE_ADDR + 4 * 0x0C)
+#define OTP_STATUS_OTP_LOCK_           (0x10)
+#define OTP_STATUS_WEB_                        (0x08)
+#define OTP_STATUS_PGMEN               (0x04)
+#define OTP_STATUS_CPUMPEN_            (0x02)
+#define OTP_STATUS_BUSY_               (0x01)
+
+#define OTP_MAX_PRG                    (OTP_BASE_ADDR + 4 * 0x0D)
+#define OTP_MAX_PRG_MAX_PROG           (0x1F)
+
+#define OTP_INTR_STATUS                        (OTP_BASE_ADDR + 4 * 0x10)
+#define OTP_INTR_STATUS_READY_         (0x01)
+
+#define OTP_INTR_MASK                  (OTP_BASE_ADDR + 4 * 0x11)
+#define OTP_INTR_MASK_READY_           (0x01)
+
+#define OTP_RSTB_PW1                   (OTP_BASE_ADDR + 4 * 0x14)
+#define OTP_RSTB_PW2                   (OTP_BASE_ADDR + 4 * 0x15)
+#define OTP_PGM_PW1                    (OTP_BASE_ADDR + 4 * 0x18)
+#define OTP_PGM_PW2                    (OTP_BASE_ADDR + 4 * 0x19)
+#define OTP_READ_PW1                   (OTP_BASE_ADDR + 4 * 0x1C)
+#define OTP_READ_PW2                   (OTP_BASE_ADDR + 4 * 0x1D)
+#define OTP_TCRST                      (OTP_BASE_ADDR + 4 * 0x20)
+#define OTP_RSRD                       (OTP_BASE_ADDR + 4 * 0x21)
+#define OTP_TREADEN_VAL                        (OTP_BASE_ADDR + 4 * 0x22)
+#define OTP_TDLES_VAL                  (OTP_BASE_ADDR + 4 * 0x23)
+#define OTP_TWWL_VAL                   (OTP_BASE_ADDR + 4 * 0x24)
+#define OTP_TDLEH_VAL                  (OTP_BASE_ADDR + 4 * 0x25)
+#define OTP_TWPED_VAL                  (OTP_BASE_ADDR + 4 * 0x26)
+#define OTP_TPES_VAL                   (OTP_BASE_ADDR + 4 * 0x27)
+#define OTP_TCPS_VAL                   (OTP_BASE_ADDR + 4 * 0x28)
+#define OTP_TCPH_VAL                   (OTP_BASE_ADDR + 4 * 0x29)
+#define OTP_TPGMVFY_VAL                        (OTP_BASE_ADDR + 4 * 0x2A)
+#define OTP_TPEH_VAL                   (OTP_BASE_ADDR + 4 * 0x2B)
+#define OTP_TPGRST_VAL                 (OTP_BASE_ADDR + 4 * 0x2C)
+#define OTP_TCLES_VAL                  (OTP_BASE_ADDR + 4 * 0x2D)
+#define OTP_TCLEH_VAL                  (OTP_BASE_ADDR + 4 * 0x2E)
+#define OTP_TRDES_VAL                  (OTP_BASE_ADDR + 4 * 0x2F)
+#define OTP_TBCACC_VAL                 (OTP_BASE_ADDR + 4 * 0x30)
+#define OTP_TAAC_VAL                   (OTP_BASE_ADDR + 4 * 0x31)
+#define OTP_TACCT_VAL                  (OTP_BASE_ADDR + 4 * 0x32)
+#define OTP_TRDEP_VAL                  (OTP_BASE_ADDR + 4 * 0x38)
+#define OTP_TPGSV_VAL                  (OTP_BASE_ADDR + 4 * 0x39)
+#define OTP_TPVSR_VAL                  (OTP_BASE_ADDR + 4 * 0x3A)
+#define OTP_TPVHR_VAL                  (OTP_BASE_ADDR + 4 * 0x3B)
+#define OTP_TPVSA_VAL                  (OTP_BASE_ADDR + 4 * 0x3C)
+
+#define PHY_ID1                                (0x02)
+#define PHY_ID2                                (0x03)
+
+#define PHY_DEV_ID_OUI_VTSE            (0x04001C)
+#define PHY_DEV_ID_MODEL_VTSE_8502     (0x23)
+
+#define PHY_AUTONEG_ADV                        (0x04)
+#define NWAY_AR_NEXT_PAGE_             (0x8000)
+#define NWAY_AR_REMOTE_FAULT_          (0x2000)
+#define NWAY_AR_ASM_DIR_               (0x0800)
+#define NWAY_AR_PAUSE_                 (0x0400)
+#define NWAY_AR_100T4_CAPS_            (0x0200)
+#define NWAY_AR_100TX_FD_CAPS_         (0x0100)
+#define NWAY_AR_SELECTOR_FIELD_                (0x001F)
+#define NWAY_AR_100TX_HD_CAPS_         (0x0080)
+#define NWAY_AR_10T_FD_CAPS_           (0x0040)
+#define NWAY_AR_10T_HD_CAPS_           (0x0020)
+#define NWAY_AR_ALL_CAPS_              (NWAY_AR_10T_HD_CAPS_ | \
+                                        NWAY_AR_10T_FD_CAPS_ | \
+                                        NWAY_AR_100TX_HD_CAPS_ | \
+                                        NWAY_AR_100TX_FD_CAPS_)
+#define NWAY_AR_PAUSE_MASK             (NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_)
+
+#define PHY_LP_ABILITY                 (0x05)
+#define NWAY_LPAR_NEXT_PAGE_           (0x8000)
+#define NWAY_LPAR_ACKNOWLEDGE_         (0x4000)
+#define NWAY_LPAR_REMOTE_FAULT_                (0x2000)
+#define NWAY_LPAR_ASM_DIR_             (0x0800)
+#define NWAY_LPAR_PAUSE_               (0x0400)
+#define NWAY_LPAR_100T4_CAPS_          (0x0200)
+#define NWAY_LPAR_100TX_FD_CAPS_       (0x0100)
+#define NWAY_LPAR_100TX_HD_CAPS_       (0x0080)
+#define NWAY_LPAR_10T_FD_CAPS_         (0x0040)
+#define NWAY_LPAR_10T_HD_CAPS_         (0x0020)
+#define NWAY_LPAR_SELECTOR_FIELD_      (0x001F)
+
+#define PHY_AUTONEG_EXP                        (0x06)
+#define NWAY_ER_PAR_DETECT_FAULT_      (0x0010)
+#define NWAY_ER_LP_NEXT_PAGE_CAPS_     (0x0008)
+#define NWAY_ER_NEXT_PAGE_CAPS_                (0x0004)
+#define NWAY_ER_PAGE_RXD_              (0x0002)
+#define NWAY_ER_LP_NWAY_CAPS_          (0x0001)
+
+#define PHY_NEXT_PAGE_TX               (0x07)
+#define NPTX_NEXT_PAGE_                        (0x8000)
+#define NPTX_MSG_PAGE_                 (0x2000)
+#define NPTX_ACKNOWLDGE2_              (0x1000)
+#define NPTX_TOGGLE_                   (0x0800)
+#define NPTX_MSG_CODE_FIELD_           (0x0001)
+
+#define PHY_LP_NEXT_PAGE               (0x08)
+#define LP_RNPR_NEXT_PAGE_             (0x8000)
+#define LP_RNPR_ACKNOWLDGE_            (0x4000)
+#define LP_RNPR_MSG_PAGE_              (0x2000)
+#define LP_RNPR_ACKNOWLDGE2_           (0x1000)
+#define LP_RNPR_TOGGLE_                        (0x0800)
+#define LP_RNPR_MSG_CODE_FIELD_                (0x0001)
+
+#define PHY_1000T_CTRL                 (0x09)
+#define CR_1000T_TEST_MODE_4_          (0x8000)
+#define CR_1000T_TEST_MODE_3_          (0x6000)
+#define CR_1000T_TEST_MODE_2_          (0x4000)
+#define CR_1000T_TEST_MODE_1_          (0x2000)
+#define CR_1000T_MS_ENABLE_            (0x1000)
+#define CR_1000T_MS_VALUE_             (0x0800)
+#define CR_1000T_REPEATER_DTE_         (0x0400)
+#define CR_1000T_FD_CAPS_              (0x0200)
+#define CR_1000T_HD_CAPS_              (0x0100)
+#define CR_1000T_ASYM_PAUSE_           (0x0080)
+#define CR_1000T_TEST_MODE_NORMAL_     (0x0000)
+
+#define PHY_1000T_STATUS               (0x0A)
+#define SR_1000T_MS_CONFIG_FAULT_      (0x8000)
+#define SR_1000T_MS_CONFIG_RES_                (0x4000)
+#define SR_1000T_LOCAL_RX_STATUS_      (0x2000)
+#define SR_1000T_REMOTE_RX_STATUS_     (0x1000)
+#define SR_1000T_LP_FD_CAPS_           (0x0800)
+#define SR_1000T_LP_HD_CAPS_           (0x0400)
+#define SR_1000T_ASYM_PAUSE_DIR_       (0x0100)
+#define SR_1000T_IDLE_ERROR_CNT_       (0x00FF)
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT                12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT         13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT  5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20          20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100         100
+
+#define PHY_EXT_STATUS                 (0x0F)
+#define IEEE_ESR_1000X_FD_CAPS_                (0x8000)
+#define IEEE_ESR_1000X_HD_CAPS_                (0x4000)
+#define IEEE_ESR_1000T_FD_CAPS_                (0x2000)
+#define IEEE_ESR_1000T_HD_CAPS_                (0x1000)
+#define PHY_TX_POLARITY_MASK_          (0x0100)
+#define PHY_TX_NORMAL_POLARITY_                (0x0000)
+#define AUTO_POLARITY_DISABLE_         (0x0010)
+
+#define PHY_MMD_CTL                    (0x0D)
+#define PHY_MMD_CTRL_OP_MASK_          (0xC000)
+#define PHY_MMD_CTRL_OP_REG_           (0x0000)
+#define PHY_MMD_CTRL_OP_DNI_           (0x4000)
+#define PHY_MMD_CTRL_OP_DPIRW_         (0x8000)
+#define PHY_MMD_CTRL_OP_DPIWO_         (0xC000)
+#define PHY_MMD_CTRL_DEV_ADDR_MASK_    (0x001F)
+
+#define PHY_MMD_REG_DATA               (0x0E)
+
+/* VTSE Vendor Specific registers */
+#define PHY_VTSE_BYPASS                                (0x12)
+#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_     (0x0020)
+
+#define PHY_VTSE_INT_MASK                      (0x19)
+#define PHY_VTSE_INT_MASK_MDINTPIN_EN_         (0x8000)
+#define PHY_VTSE_INT_MASK_SPEED_CHANGE_                (0x4000)
+#define PHY_VTSE_INT_MASK_LINK_CHANGE_         (0x2000)
+#define PHY_VTSE_INT_MASK_FDX_CHANGE_          (0x1000)
+#define PHY_VTSE_INT_MASK_AUTONEG_ERR_         (0x0800)
+#define PHY_VTSE_INT_MASK_AUTONEG_DONE_                (0x0400)
+#define PHY_VTSE_INT_MASK_POE_DETECT_          (0x0200)
+#define PHY_VTSE_INT_MASK_SYMBOL_ERR_          (0x0100)
+#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_      (0x0080)
+#define PHY_VTSE_INT_MASK_WOL_EVENT_           (0x0040)
+#define PHY_VTSE_INT_MASK_EXTENDED_INT_                (0x0020)
+#define PHY_VTSE_INT_MASK_RESERVED_            (0x0010)
+#define PHY_VTSE_INT_MASK_FALSE_CARRIER_       (0x0008)
+#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_       (0x0004)
+#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_   (0x0002)
+#define PHY_VTSE_INT_MASK_RX__ER_              (0x0001)
+
+#define PHY_VTSE_INT_STS                       (0x1A)
+#define PHY_VTSE_INT_STS_INT_ACTIVE_           (0x8000)
+#define PHY_VTSE_INT_STS_SPEED_CHANGE_         (0x4000)
+#define PHY_VTSE_INT_STS_LINK_CHANGE_          (0x2000)
+#define PHY_VTSE_INT_STS_FDX_CHANGE_           (0x1000)
+#define PHY_VTSE_INT_STS_AUTONEG_ERR_          (0x0800)
+#define PHY_VTSE_INT_STS_AUTONEG_DONE_         (0x0400)
+#define PHY_VTSE_INT_STS_POE_DETECT_           (0x0200)
+#define PHY_VTSE_INT_STS_SYMBOL_ERR_           (0x0100)
+#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_       (0x0080)
+#define PHY_VTSE_INT_STS_WOL_EVENT_            (0x0040)
+#define PHY_VTSE_INT_STS_EXTENDED_INT_         (0x0020)
+#define PHY_VTSE_INT_STS_RESERVED_             (0x0010)
+#define PHY_VTSE_INT_STS_FALSE_CARRIER_                (0x0008)
+#define PHY_VTSE_INT_STS_LINK_SPEED_DS_                (0x0004)
+#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_    (0x0002)
+#define PHY_VTSE_INT_STS_RX_ER_                        (0x0001)
+
+/* VTSE PHY registers */
+#define PHY_EXT_GPIO_PAGE              (0x1F)
+#define PHY_EXT_GPIO_PAGE_SPACE_0      (0x0000)
+#define PHY_EXT_GPIO_PAGE_SPACE_1      (0x0001)
+#define PHY_EXT_GPIO_PAGE_SPACE_2      (0x0002)
+
+/* Extended Register Page 1 space */
+#define PHY_EXT_MODE_CTRL              (0x13)
+#define PHY_EXT_MODE_CTRL_MDIX_MASK_   (0x000C)
+#define PHY_EXT_MODE_CTRL_AUTO_MDIX_   (0x0000)
+#define PHY_EXT_MODE_CTRL_MDI_         (0x0008)
+#define PHY_EXT_MODE_CTRL_MDI_X_       (0x000C)
+
+#define PHY_ANA_10BASE_T_HD            0x01
+#define PHY_ANA_10BASE_T_FD            0x02
+#define PHY_ANA_100BASE_TX_HD          0x04
+#define PHY_ANA_100BASE_TX_FD          0x08
+#define PHY_ANA_1000BASE_T_FD          0x10
+#define PHY_ANA_ALL_SUPPORTED_MEDIA    (PHY_ANA_10BASE_T_HD |   \
+                                        PHY_ANA_10BASE_T_FD |   \
+                                        PHY_ANA_100BASE_TX_HD | \
+                                        PHY_ANA_100BASE_TX_FD | \
+                                        PHY_ANA_1000BASE_T_FD)
+/* PHY MMD registers */
+#define PHY_MMD_DEV_3                          3
+
+#define PHY_EEE_PCS_STATUS                     (0x1)
+#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_                ((WORD)0x0800)
+#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_                ((WORD)0x0400)
+#define PHY_EEE_PCS_STATUS_TX_LPI_IND_         ((WORD)0x0200)
+#define PHY_EEE_PCS_STATUS_RX_LPI_IND_         ((WORD)0x0100)
+#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_    ((WORD)0x0004)
+
+#define PHY_EEE_CAPABILITIES                   (0x14)
+#define PHY_EEE_CAPABILITIES_1000BT_EEE_       ((WORD)0x0004)
+#define PHY_EEE_CAPABILITIES_100BT_EEE_                ((WORD)0x0002)
+
+#define PHY_MMD_DEV_7                          7
+
+#define PHY_EEE_ADVERTISEMENT                  (0x3C)
+#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_      ((WORD)0x0004)
+#define PHY_EEE_ADVERTISEMENT_100BT_EEE_       ((WORD)0x0002)
+
+#define PHY_EEE_LP_ADVERTISEMENT               (0x3D)
+#define PHY_EEE_1000BT_EEE_CAPABLE_            ((WORD)0x0004)
+#define PHY_EEE_100BT_EEE_CAPABLE_             ((WORD)0x0002)
+#endif /* _LAN78XX_H */
index f603f362504bce0c1cb2656e1d29232eb05db846..1f7a7cd97e50277e48487e18eaeafc9406b27f46 100644 (file)
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
        {QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
        {QMI_FIXED_INTF(0x1199, 0x9041, 8)},    /* Sierra Wireless MC7305/MC7355 */
+       {QMI_FIXED_INTF(0x1199, 0x9041, 10)},   /* Sierra Wireless MC7305/MC7355 */
        {QMI_FIXED_INTF(0x1199, 0x9051, 8)},    /* Netgear AirCard 340U */
        {QMI_FIXED_INTF(0x1199, 0x9053, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9054, 8)},    /* Sierra Wireless Modem */
@@ -784,6 +785,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81b1, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
        /* 4. Gobi 1000 devices */
index 7f6419ebb5e1cd8c7fc5abbe10170a4ad6d92ea8..fe4ec324aebc0284f3a72849dcfc6cc9187196a8 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/usb/cdc.h>
 
 /* Version Information */
-#define DRIVER_VERSION "v1.08.0 (2015/01/13)"
+#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define MODULENAME "r8152"
 
 /* USB_USB_CTRL */
 #define RX_AGG_DISABLE         0x0010
+#define RX_ZERO_EN             0x0080
 
 /* USB_U2P3_CTRL */
 #define U2P3_ENABLE            0x0001
@@ -622,6 +623,7 @@ enum rtl_version {
        RTL_VER_03,
        RTL_VER_04,
        RTL_VER_05,
+       RTL_VER_06,
        RTL_VER_MAX
 };
 
@@ -1902,11 +1904,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
 static void rtl8152_tx_timeout(struct net_device *netdev)
 {
        struct r8152 *tp = netdev_priv(netdev);
-       int i;
 
        netif_warn(tp, tx_err, netdev, "Tx timeout\n");
-       for (i = 0; i < RTL8152_MAX_TX; i++)
-               usb_unlink_urb(tp->tx_info[i].urb);
+
+       usb_queue_reset_device(tp->intf);
 }
 
 static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2075,7 +2076,6 @@ static int rtl_start_rx(struct r8152 *tp)
 {
        int i, ret = 0;
 
-       napi_disable(&tp->napi);
        INIT_LIST_HEAD(&tp->rx_done);
        for (i = 0; i < RTL8152_MAX_RX; i++) {
                INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2083,7 +2083,6 @@ static int rtl_start_rx(struct r8152 *tp)
                if (ret)
                        break;
        }
-       napi_enable(&tp->napi);
 
        if (ret && ++i < RTL8152_MAX_RX) {
                struct list_head rx_queue;
@@ -2166,6 +2165,7 @@ static int rtl8153_enable(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return -ENODEV;
 
+       usb_disable_lpm(tp->udev);
        set_tx_qlen(tp);
        rtl_set_eee_plus(tp);
        r8153_set_rx_early_timeout(tp);
@@ -2337,11 +2337,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
                device_set_wakeup_enable(&tp->udev->dev, false);
 }
 
+static void r8153_u1u2en(struct r8152 *tp, bool enable)
+{
+       u8 u1u2[8];
+
+       if (enable)
+               memset(u1u2, 0xff, sizeof(u1u2));
+       else
+               memset(u1u2, 0x00, sizeof(u1u2));
+
+       usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
+}
+
+static void r8153_u2p3en(struct r8152 *tp, bool enable)
+{
+       u32 ocp_data;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
+       if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
+               ocp_data |= U2P3_ENABLE;
+       else
+               ocp_data &= ~U2P3_ENABLE;
+       ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
+}
+
+static void r8153_power_cut_en(struct r8152 *tp, bool enable)
+{
+       u32 ocp_data;
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
+       if (enable)
+               ocp_data |= PWR_EN | PHASE2_EN;
+       else
+               ocp_data &= ~(PWR_EN | PHASE2_EN);
+       ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
+
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+       ocp_data &= ~PCUT_STATUS;
+       ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
+}
+
+static bool rtl_can_wakeup(struct r8152 *tp)
+{
+       struct usb_device *udev = tp->udev;
+
+       return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
+}
+
 static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
 {
        if (enable) {
                u32 ocp_data;
 
+               r8153_u1u2en(tp, false);
+               r8153_u2p3en(tp, false);
+
                __rtl_set_wol(tp, WAKE_ANY);
 
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2353,6 +2403,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
        } else {
                __rtl_set_wol(tp, tp->saved_wolopts);
+               r8153_u2p3en(tp, true);
+               r8153_u1u2en(tp, true);
        }
 }
 
@@ -2560,7 +2612,10 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
        u32 ocp_data;
        u16 data;
 
-       ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+       if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+           tp->version == RTL_VER_05)
+               ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
        data = r8152_mdio_read(tp, MII_BMCR);
        if (data & BMCR_PDOWN) {
                data &= ~BMCR_PDOWN;
@@ -2599,46 +2654,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
        set_bit(PHY_RESET, &tp->flags);
 }
 
-static void r8153_u1u2en(struct r8152 *tp, bool enable)
-{
-       u8 u1u2[8];
-
-       if (enable)
-               memset(u1u2, 0xff, sizeof(u1u2));
-       else
-               memset(u1u2, 0x00, sizeof(u1u2));
-
-       usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
-}
-
-static void r8153_u2p3en(struct r8152 *tp, bool enable)
-{
-       u32 ocp_data;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
-       if (enable)
-               ocp_data |= U2P3_ENABLE;
-       else
-               ocp_data &= ~U2P3_ENABLE;
-       ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
-}
-
-static void r8153_power_cut_en(struct r8152 *tp, bool enable)
-{
-       u32 ocp_data;
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
-       if (enable)
-               ocp_data |= PWR_EN | PHASE2_EN;
-       else
-               ocp_data &= ~(PWR_EN | PHASE2_EN);
-       ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
-
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
-       ocp_data &= ~PCUT_STATUS;
-       ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
-}
-
 static void r8153_first_init(struct r8152 *tp)
 {
        u32 ocp_data;
@@ -2701,7 +2716,7 @@ static void r8153_first_init(struct r8152 *tp)
 
        /* rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-       ocp_data &= ~RX_AGG_DISABLE;
+       ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -2781,6 +2796,7 @@ static void rtl8153_disable(struct r8152 *tp)
        r8153_disable_aldps(tp);
        rtl_disable(tp);
        r8153_enable_aldps(tp);
+       usb_enable_lpm(tp->udev);
 }
 
 static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2901,9 +2917,13 @@ static void rtl8153_up(struct r8152 *tp)
        if (test_bit(RTL8152_UNPLUG, &tp->flags))
                return;
 
+       r8153_u1u2en(tp, false);
        r8153_disable_aldps(tp);
        r8153_first_init(tp);
        r8153_enable_aldps(tp);
+       r8153_u2p3en(tp, true);
+       r8153_u1u2en(tp, true);
+       usb_enable_lpm(tp->udev);
 }
 
 static void rtl8153_down(struct r8152 *tp)
@@ -2914,6 +2934,7 @@ static void rtl8153_down(struct r8152 *tp)
        }
 
        r8153_u1u2en(tp, false);
+       r8153_u2p3en(tp, false);
        r8153_power_cut_en(tp, false);
        r8153_disable_aldps(tp);
        r8153_enter_oob(tp);
@@ -2932,8 +2953,10 @@ static void set_carrier(struct r8152 *tp)
                if (!netif_carrier_ok(netdev)) {
                        tp->rtl_ops.enable(tp);
                        set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+                       napi_disable(&tp->napi);
                        netif_carrier_on(netdev);
                        rtl_start_rx(tp);
+                       napi_enable(&tp->napi);
                }
        } else {
                if (netif_carrier_ok(netdev)) {
@@ -3223,7 +3246,7 @@ static void r8152b_init(struct r8152 *tp)
 
        /* enable rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-       ocp_data &= ~RX_AGG_DISABLE;
+       ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -3252,6 +3275,7 @@ static void r8153_init(struct r8152 *tp)
                msleep(20);
        }
 
+       usb_disable_lpm(tp->udev);
        r8153_u2p3en(tp, false);
 
        if (tp->version == RTL_VER_04) {
@@ -3268,6 +3292,13 @@ static void r8153_init(struct r8152 *tp)
                ocp_data &= ~ECM_ALDPS;
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0, ocp_data);
 
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
+               if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
+                       ocp_data &= ~DYNAMIC_BURST;
+               else
+                       ocp_data |= DYNAMIC_BURST;
+               ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+       } else if (tp->version == RTL_VER_06) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
                if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
                        ocp_data &= ~DYNAMIC_BURST;
@@ -3319,6 +3350,59 @@ static void r8153_init(struct r8152 *tp)
        r8153_enable_aldps(tp);
        r8152b_enable_fc(tp);
        rtl_tally_reset(tp);
+       r8153_u2p3en(tp, true);
+}
+
+static int rtl8152_pre_reset(struct usb_interface *intf)
+{
+       struct r8152 *tp = usb_get_intfdata(intf);
+       struct net_device *netdev;
+
+       if (!tp)
+               return 0;
+
+       netdev = tp->netdev;
+       if (!netif_running(netdev))
+               return 0;
+
+       napi_disable(&tp->napi);
+       clear_bit(WORK_ENABLE, &tp->flags);
+       usb_kill_urb(tp->intr_urb);
+       cancel_delayed_work_sync(&tp->schedule);
+       if (netif_carrier_ok(netdev)) {
+               netif_stop_queue(netdev);
+               mutex_lock(&tp->control);
+               tp->rtl_ops.disable(tp);
+               mutex_unlock(&tp->control);
+       }
+
+       return 0;
+}
+
+static int rtl8152_post_reset(struct usb_interface *intf)
+{
+       struct r8152 *tp = usb_get_intfdata(intf);
+       struct net_device *netdev;
+
+       if (!tp)
+               return 0;
+
+       netdev = tp->netdev;
+       if (!netif_running(netdev))
+               return 0;
+
+       set_bit(WORK_ENABLE, &tp->flags);
+       if (netif_carrier_ok(netdev)) {
+               mutex_lock(&tp->control);
+               tp->rtl_ops.enable(tp);
+               rtl8152_set_rx_mode(netdev);
+               mutex_unlock(&tp->control);
+               netif_wake_queue(netdev);
+       }
+
+       napi_enable(&tp->napi);
+
+       return 0;
 }
 
 static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3374,9 +3458,11 @@ static int rtl8152_resume(struct usb_interface *intf)
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        rtl_runtime_suspend_enable(tp, false);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                       napi_disable(&tp->napi);
                        set_bit(WORK_ENABLE, &tp->flags);
                        if (netif_carrier_ok(tp->netdev))
                                rtl_start_rx(tp);
+                       napi_enable(&tp->napi);
                } else {
                        tp->rtl_ops.up(tp);
                        rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3403,12 +3489,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (usb_autopm_get_interface(tp->intf) < 0)
                return;
 
-       mutex_lock(&tp->control);
-
-       wol->supported = WAKE_ANY;
-       wol->wolopts = __rtl_get_wol(tp);
-
-       mutex_unlock(&tp->control);
+       if (!rtl_can_wakeup(tp)) {
+               wol->supported = 0;
+               wol->wolopts = 0;
+       } else {
+               mutex_lock(&tp->control);
+               wol->supported = WAKE_ANY;
+               wol->wolopts = __rtl_get_wol(tp);
+               mutex_unlock(&tp->control);
+       }
 
        usb_autopm_put_interface(tp->intf);
 }
@@ -3418,6 +3507,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        struct r8152 *tp = netdev_priv(dev);
        int ret;
 
+       if (!rtl_can_wakeup(tp))
+               return -EOPNOTSUPP;
+
        ret = usb_autopm_get_interface(tp->intf);
        if (ret < 0)
                goto out_set_wol;
@@ -3908,6 +4000,10 @@ static void r8152b_get_version(struct r8152 *tp)
                tp->version = RTL_VER_05;
                tp->mii.supports_gmii = 1;
                break;
+       case 0x5c30:
+               tp->version = RTL_VER_06;
+               tp->mii.supports_gmii = 1;
+               break;
        default:
                netif_info(tp, probe, tp->netdev,
                           "Unknown version 0x%04x\n", version);
@@ -3953,6 +4049,7 @@ static int rtl_ops_init(struct r8152 *tp)
        case RTL_VER_03:
        case RTL_VER_04:
        case RTL_VER_05:
+       case RTL_VER_06:
                ops->init               = r8153_init;
                ops->enable             = rtl8153_enable;
                ops->disable            = rtl8153_disable;
@@ -4059,6 +4156,9 @@ static int rtl8152_probe(struct usb_interface *intf,
                goto out1;
        }
 
+       if (!rtl_can_wakeup(tp))
+               __rtl_set_wol(tp, 0);
+
        tp->saved_wolopts = __rtl_get_wol(tp);
        if (tp->saved_wolopts)
                device_set_wakeup_enable(&udev->dev, true);
@@ -4132,6 +4232,8 @@ static struct usb_driver rtl8152_driver = {
        .suspend =      rtl8152_suspend,
        .resume =       rtl8152_resume,
        .reset_resume = rtl8152_resume,
+       .pre_reset =    rtl8152_pre_reset,
+       .post_reset =   rtl8152_post_reset,
        .supports_autosuspend = 1,
        .disable_hub_initiated_lpm = 1,
 };
index c8186ffda1a314f08cb71b6688c5b120e57c1717..343592c4315f6397d6a2414457a7d10b532c0eca 100644 (file)
@@ -290,6 +290,7 @@ static const struct net_device_ops veth_netdev_ops = {
        .ndo_poll_controller    = veth_poll_controller,
 #endif
        .ndo_get_iflink         = veth_get_iflink,
+       .ndo_features_check     = passthru_features_check,
 };
 
 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
index 63c7810e1545a357eda7578af862ed18322de933..66f08f622dc6603026ceb7646878a6878b637d15 100644 (file)
@@ -518,7 +518,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
 
        skb_mark_napi_id(skb, &rq->napi);
 
-       netif_receive_skb(skb);
+       napi_gro_receive(&rq->napi, skb);
        return;
 
 frame_err:
@@ -756,7 +756,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        /* Out of packets? */
        if (received < budget) {
                r = virtqueue_enable_cb_prepare(rq->vq);
-               napi_complete(napi);
+               napi_complete_done(napi, received);
                if (unlikely(virtqueue_poll(rq->vq, r)) &&
                    napi_schedule_prep(napi)) {
                        virtqueue_disable_cb(rq->vq);
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        else
                vi->hdr_len = sizeof(struct virtio_net_hdr);
 
-       if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
+       if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
+           virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
                vi->any_header_sg = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
index 34c519eb1db5092a6e1bd17e02b9a3e53a5c5cb2..e90f7a484e1c741b5b51c04481a3d749bd6ab7bd 100644 (file)
 #include <net/ip6_tunnel.h>
 #include <net/ip6_checksum.h>
 #endif
+#include <net/dst_metadata.h>
 
 #define VXLAN_VERSION  "0.1"
 
 #define PORT_HASH_BITS 8
 #define PORT_HASH_SIZE  (1<<PORT_HASH_BITS)
-#define VNI_HASH_BITS  10
-#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS  8
-#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
 #define FDB_AGE_DEFAULT 300 /* 5 min */
 #define FDB_AGE_INTERVAL (10 * HZ)     /* rescan interval */
 
@@ -74,9 +71,13 @@ module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
 static int vxlan_net_id;
+static struct rtnl_link_ops vxlan_link_ops;
 
 static const u8 all_zeros_mac[ETH_ALEN];
 
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+                                        bool no_share, u32 flags);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
        struct list_head  vxlan_list;
@@ -84,21 +85,6 @@ struct vxlan_net {
        spinlock_t        sock_lock;
 };
 
-union vxlan_addr {
-       struct sockaddr_in sin;
-       struct sockaddr_in6 sin6;
-       struct sockaddr sa;
-};
-
-struct vxlan_rdst {
-       union vxlan_addr         remote_ip;
-       __be16                   remote_port;
-       u32                      remote_vni;
-       u32                      remote_ifindex;
-       struct list_head         list;
-       struct rcu_head          rcu;
-};
-
 /* Forwarding table entry */
 struct vxlan_fdb {
        struct hlist_node hlist;        /* linked list of entries */
@@ -106,40 +92,21 @@ struct vxlan_fdb {
        unsigned long     updated;      /* jiffies */
        unsigned long     used;
        struct list_head  remotes;
+       u8                eth_addr[ETH_ALEN];
        u16               state;        /* see ndm_state */
        u8                flags;        /* see ndm_flags */
-       u8                eth_addr[ETH_ALEN];
-};
-
-/* Pseudo network device */
-struct vxlan_dev {
-       struct hlist_node hlist;        /* vni hash table */
-       struct list_head  next;         /* vxlan's per namespace list */
-       struct vxlan_sock *vn_sock;     /* listening socket */
-       struct net_device *dev;
-       struct net        *net;         /* netns for packet i/o */
-       struct vxlan_rdst default_dst;  /* default destination */
-       union vxlan_addr  saddr;        /* source address */
-       __be16            dst_port;
-       __u16             port_min;     /* source port range */
-       __u16             port_max;
-       __u8              tos;          /* TOS override */
-       __u8              ttl;
-       u32               flags;        /* VXLAN_F_* in vxlan.h */
-
-       unsigned long     age_interval;
-       struct timer_list age_timer;
-       spinlock_t        hash_lock;
-       unsigned int      addrcnt;
-       unsigned int      addrmax;
-
-       struct hlist_head fdb_head[FDB_HASH_SIZE];
 };
 
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
 static struct workqueue_struct *vxlan_wq;
 
+static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
+{
+       return vs->flags & VXLAN_F_COLLECT_METADATA ||
+              ip_tunnel_collect_metadata();
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline
 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
@@ -345,7 +312,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
                goto nla_put_failure;
 
-       if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
+       if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
            nla_put_be16(skb, NDA_PORT, rdst->remote_port))
                goto nla_put_failure;
        if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
@@ -749,7 +716,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
 
-               if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+               if (vxlan->cfg.addrmax &&
+                   vxlan->addrcnt >= vxlan->cfg.addrmax)
                        return -ENOSPC;
 
                /* Disallow replace to add a multicast entry */
@@ -835,7 +803,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
                        return -EINVAL;
                *port = nla_get_be16(tb[NDA_PORT]);
        } else {
-               *port = vxlan->dst_port;
+               *port = vxlan->cfg.dst_port;
        }
 
        if (tb[NDA_VNI]) {
@@ -1021,7 +989,7 @@ static bool vxlan_snoop(struct net_device *dev,
                        vxlan_fdb_create(vxlan, src_mac, src_ip,
                                         NUD_REACHABLE,
                                         NLM_F_EXCL|NLM_F_CREATE,
-                                        vxlan->dst_port,
+                                        vxlan->cfg.dst_port,
                                         vxlan->default_dst.remote_vni,
                                         0, NTF_SELF);
                spin_unlock(&vxlan->hash_lock);
@@ -1062,7 +1030,7 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
        return false;
 }
 
-void vxlan_sock_release(struct vxlan_sock *vs)
+static void vxlan_sock_release(struct vxlan_sock *vs)
 {
        struct sock *sk = vs->sock->sk;
        struct net *net = sock_net(sk);
@@ -1078,7 +1046,6 @@ void vxlan_sock_release(struct vxlan_sock *vs)
 
        queue_work(vxlan_wq, &vs->del_work);
 }
-EXPORT_SYMBOL_GPL(vxlan_sock_release);
 
 /* Update multicast group membership when first VNI on
  * multicast address is brought up
@@ -1161,13 +1128,112 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
        return vh;
 }
 
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+                     struct vxlan_metadata *md, u32 vni,
+                     struct metadata_dst *tun_dst)
+{
+       struct iphdr *oip = NULL;
+       struct ipv6hdr *oip6 = NULL;
+       struct vxlan_dev *vxlan;
+       struct pcpu_sw_netstats *stats;
+       union vxlan_addr saddr;
+       int err = 0;
+       union vxlan_addr *remote_ip;
+
+       /* For flow based devices, map all packets to VNI 0 */
+       if (vs->flags & VXLAN_F_FLOW_BASED)
+               vni = 0;
+
+       /* Is this VNI defined? */
+       vxlan = vxlan_vs_find_vni(vs, vni);
+       if (!vxlan)
+               goto drop;
+
+       remote_ip = &vxlan->default_dst.remote_ip;
+       skb_reset_mac_header(skb);
+       skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
+       skb->protocol = eth_type_trans(skb, vxlan->dev);
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+       /* Ignore packet loops (and multicast echo) */
+       if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+               goto drop;
+
+       /* Re-examine inner Ethernet packet */
+       if (remote_ip->sa.sa_family == AF_INET) {
+               oip = ip_hdr(skb);
+               saddr.sin.sin_addr.s_addr = oip->saddr;
+               saddr.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else {
+               oip6 = ipv6_hdr(skb);
+               saddr.sin6.sin6_addr = oip6->saddr;
+               saddr.sa.sa_family = AF_INET6;
+#endif
+       }
+
+       if (tun_dst) {
+               skb_dst_set(skb, (struct dst_entry *)tun_dst);
+               tun_dst = NULL;
+       }
+
+       if ((vxlan->flags & VXLAN_F_LEARN) &&
+           vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
+               goto drop;
+
+       skb_reset_network_header(skb);
+       /* In flow-based mode, GBP is carried in dst_metadata */
+       if (!(vs->flags & VXLAN_F_FLOW_BASED))
+               skb->mark = md->gbp;
+
+       if (oip6)
+               err = IP6_ECN_decapsulate(oip6, skb);
+       if (oip)
+               err = IP_ECN_decapsulate(oip, skb);
+
+       if (unlikely(err)) {
+               if (log_ecn_error) {
+                       if (oip6)
+                               net_info_ratelimited("non-ECT from %pI6\n",
+                                                    &oip6->saddr);
+                       if (oip)
+                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                                    &oip->saddr, oip->tos);
+               }
+               if (err > 1) {
+                       ++vxlan->dev->stats.rx_frame_errors;
+                       ++vxlan->dev->stats.rx_errors;
+                       goto drop;
+               }
+       }
+
+       stats = this_cpu_ptr(vxlan->dev->tstats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->syncp);
+
+       netif_rx(skb);
+
+       return;
+drop:
+       if (tun_dst)
+               dst_release((struct dst_entry *)tun_dst);
+
+       /* Consume bad packet */
+       kfree_skb(skb);
+}
+
 /* Callback from net/ipv4/udp.c to receive packets */
 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
+       struct metadata_dst *tun_dst = NULL;
+       struct ip_tunnel_info *info;
        struct vxlan_sock *vs;
        struct vxlanhdr *vxh;
        u32 flags, vni;
-       struct vxlan_metadata md = {0};
+       struct vxlan_metadata _md;
+       struct vxlan_metadata *md = &_md;
 
        /* Need Vxlan and inner Ethernet header to be present */
        if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1202,6 +1268,32 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                vni &= VXLAN_VNI_MASK;
        }
 
+       if (vxlan_collect_metadata(vs)) {
+               const struct iphdr *iph = ip_hdr(skb);
+
+               tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
+               if (!tun_dst)
+                       goto drop;
+
+               info = &tun_dst->u.tun_info;
+               info->key.ipv4_src = iph->saddr;
+               info->key.ipv4_dst = iph->daddr;
+               info->key.ipv4_tos = iph->tos;
+               info->key.ipv4_ttl = iph->ttl;
+               info->key.tp_src = udp_hdr(skb)->source;
+               info->key.tp_dst = udp_hdr(skb)->dest;
+
+               info->mode = IP_TUNNEL_INFO_RX;
+               info->key.tun_flags = TUNNEL_KEY;
+               info->key.tun_id = cpu_to_be64(vni >> 8);
+               if (udp_hdr(skb)->check != 0)
+                       info->key.tun_flags |= TUNNEL_CSUM;
+
+               md = ip_tunnel_info_opts(info, sizeof(*md));
+       } else {
+               memset(md, 0, sizeof(*md));
+       }
+
        /* For backwards compatibility, only allow reserved fields to be
         * used by VXLAN extensions if explicitly requested.
         */
@@ -1209,13 +1301,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                struct vxlanhdr_gbp *gbp;
 
                gbp = (struct vxlanhdr_gbp *)vxh;
-               md.gbp = ntohs(gbp->policy_id);
+               md->gbp = ntohs(gbp->policy_id);
+
+               if (tun_dst)
+                       info->key.tun_flags |= TUNNEL_VXLAN_OPT;
 
                if (gbp->dont_learn)
-                       md.gbp |= VXLAN_GBP_DONT_LEARN;
+                       md->gbp |= VXLAN_GBP_DONT_LEARN;
 
                if (gbp->policy_applied)
-                       md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+                       md->gbp |= VXLAN_GBP_POLICY_APPLIED;
 
                flags &= ~VXLAN_GBP_USED_BITS;
        }
@@ -1233,8 +1328,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                goto bad_flags;
        }
 
-       md.vni = vxh->vx_vni;
-       vs->rcv(vs, skb, &md);
+       vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
        return 0;
 
 drop:
@@ -1247,93 +1341,13 @@ bad_flags:
                   ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
 
 error:
+       if (tun_dst)
+               dst_release((struct dst_entry *)tun_dst);
+
        /* Return non vxlan pkt */
        return 1;
 }
 
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
-                     struct vxlan_metadata *md)
-{
-       struct iphdr *oip = NULL;
-       struct ipv6hdr *oip6 = NULL;
-       struct vxlan_dev *vxlan;
-       struct pcpu_sw_netstats *stats;
-       union vxlan_addr saddr;
-       __u32 vni;
-       int err = 0;
-       union vxlan_addr *remote_ip;
-
-       vni = ntohl(md->vni) >> 8;
-       /* Is this VNI defined? */
-       vxlan = vxlan_vs_find_vni(vs, vni);
-       if (!vxlan)
-               goto drop;
-
-       remote_ip = &vxlan->default_dst.remote_ip;
-       skb_reset_mac_header(skb);
-       skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
-       skb->protocol = eth_type_trans(skb, vxlan->dev);
-       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
-       /* Ignore packet loops (and multicast echo) */
-       if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
-               goto drop;
-
-       /* Re-examine inner Ethernet packet */
-       if (remote_ip->sa.sa_family == AF_INET) {
-               oip = ip_hdr(skb);
-               saddr.sin.sin_addr.s_addr = oip->saddr;
-               saddr.sa.sa_family = AF_INET;
-#if IS_ENABLED(CONFIG_IPV6)
-       } else {
-               oip6 = ipv6_hdr(skb);
-               saddr.sin6.sin6_addr = oip6->saddr;
-               saddr.sa.sa_family = AF_INET6;
-#endif
-       }
-
-       if ((vxlan->flags & VXLAN_F_LEARN) &&
-           vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
-               goto drop;
-
-       skb_reset_network_header(skb);
-       skb->mark = md->gbp;
-
-       if (oip6)
-               err = IP6_ECN_decapsulate(oip6, skb);
-       if (oip)
-               err = IP_ECN_decapsulate(oip, skb);
-
-       if (unlikely(err)) {
-               if (log_ecn_error) {
-                       if (oip6)
-                               net_info_ratelimited("non-ECT from %pI6\n",
-                                                    &oip6->saddr);
-                       if (oip)
-                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
-                                                    &oip->saddr, oip->tos);
-               }
-               if (err > 1) {
-                       ++vxlan->dev->stats.rx_frame_errors;
-                       ++vxlan->dev->stats.rx_errors;
-                       goto drop;
-               }
-       }
-
-       stats = this_cpu_ptr(vxlan->dev->tstats);
-       u64_stats_update_begin(&stats->syncp);
-       stats->rx_packets++;
-       stats->rx_bytes += skb->len;
-       u64_stats_update_end(&stats->syncp);
-
-       netif_rx(skb);
-
-       return;
-drop:
-       /* Consume bad packet */
-       kfree_skb(skb);
-}
-
 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -1672,7 +1686,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
                           struct sk_buff *skb,
                           struct net_device *dev, struct in6_addr *saddr,
                           struct in6_addr *daddr, __u8 prio, __u8 ttl,
-                          __be16 src_port, __be16 dst_port,
+                          __be16 src_port, __be16 dst_port, __be32 vni,
                           struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
        struct vxlanhdr *vxh;
@@ -1722,7 +1736,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
        vxh->vx_flags = htonl(VXLAN_HF_VNI);
-       vxh->vx_vni = md->vni;
+       vxh->vx_vni = vni;
 
        if (type & SKB_GSO_TUNNEL_REMCSUM) {
                u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1755,10 +1769,10 @@ err:
 }
 #endif
 
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-                  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port,
-                  struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+                         __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+                         __be16 src_port, __be16 dst_port, __be32 vni,
+                         struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
        struct vxlanhdr *vxh;
        int min_headroom;
@@ -1801,7 +1815,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
        vxh->vx_flags = htonl(VXLAN_HF_VNI);
-       vxh->vx_vni = md->vni;
+       vxh->vx_vni = vni;
 
        if (type & SKB_GSO_TUNNEL_REMCSUM) {
                u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1828,7 +1842,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
                                   ttl, df, src_port, dst_port, xnet,
                                   !(vxflags & VXLAN_F_UDP_CSUM));
 }
-EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
 
 /* Bypass encapsulation if the destination is local */
 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
@@ -1878,22 +1891,43 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                           struct vxlan_rdst *rdst, bool did_rsc)
 {
+       struct ip_tunnel_info *info;
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct sock *sk = vxlan->vn_sock->sock->sk;
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
        struct flowi4 fl4;
        union vxlan_addr *dst;
-       struct vxlan_metadata md;
+       union vxlan_addr remote_ip;
+       struct vxlan_metadata _md;
+       struct vxlan_metadata *md = &_md;
        __be16 src_port = 0, dst_port;
        u32 vni;
        __be16 df = 0;
        __u8 tos, ttl;
        int err;
+       u32 flags = vxlan->flags;
+
+       /* FIXME: Support IPv6 */
+       info = skb_tunnel_info(skb, AF_INET);
+
+       if (rdst) {
+               dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
+               vni = rdst->remote_vni;
+               dst = &rdst->remote_ip;
+       } else {
+               if (!info) {
+                       WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
+                                 dev->name);
+                       goto drop;
+               }
 
-       dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
-       vni = rdst->remote_vni;
-       dst = &rdst->remote_ip;
+               dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+               vni = be64_to_cpu(info->key.tun_id);
+               remote_ip.sin.sin_family = AF_INET;
+               remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst;
+               dst = &remote_ip;
+       }
 
        if (vxlan_addr_any(dst)) {
                if (did_rsc) {
@@ -1906,25 +1940,42 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
        old_iph = ip_hdr(skb);
 
-       ttl = vxlan->ttl;
+       ttl = vxlan->cfg.ttl;
        if (!ttl && vxlan_addr_multicast(dst))
                ttl = 1;
 
-       tos = vxlan->tos;
+       tos = vxlan->cfg.tos;
        if (tos == 1)
                tos = ip_tunnel_get_dsfield(old_iph, skb);
 
-       src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
-                                    vxlan->port_max, true);
+       src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+                                    vxlan->cfg.port_max, true);
 
        if (dst->sa.sa_family == AF_INET) {
+               if (info) {
+                       if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+                               df = htons(IP_DF);
+                       if (info->key.tun_flags & TUNNEL_CSUM)
+                               flags |= VXLAN_F_UDP_CSUM;
+                       else
+                               flags &= ~VXLAN_F_UDP_CSUM;
+
+                       ttl = info->key.ipv4_ttl;
+                       tos = info->key.ipv4_tos;
+
+                       if (info->options_len)
+                               md = ip_tunnel_info_opts(info, sizeof(*md));
+               } else {
+                       md->gbp = skb->mark;
+               }
+
                memset(&fl4, 0, sizeof(fl4));
-               fl4.flowi4_oif = rdst->remote_ifindex;
+               fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
                fl4.flowi4_tos = RT_TOS(tos);
                fl4.flowi4_mark = skb->mark;
                fl4.flowi4_proto = IPPROTO_UDP;
                fl4.daddr = dst->sin.sin_addr.s_addr;
-               fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
+               fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
 
                rt = ip_route_output_key(vxlan->net, &fl4);
                if (IS_ERR(rt)) {
@@ -1958,14 +2009,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-               md.vni = htonl(vni << 8);
-               md.gbp = skb->mark;
-
                err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
                                     dst->sin.sin_addr.s_addr, tos, ttl, df,
-                                    src_port, dst_port, &md,
+                                    src_port, dst_port, htonl(vni << 8), md,
                                     !net_eq(vxlan->net, dev_net(vxlan->dev)),
-                                    vxlan->flags);
+                                    flags);
                if (err < 0) {
                        /* skb is already freed. */
                        skb = NULL;
@@ -1980,13 +2028,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                u32 flags;
 
                memset(&fl6, 0, sizeof(fl6));
-               fl6.flowi6_oif = rdst->remote_ifindex;
+               fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
                fl6.daddr = dst->sin6.sin6_addr;
-               fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+               fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
                fl6.flowi6_mark = skb->mark;
                fl6.flowi6_proto = IPPROTO_UDP;
 
-               if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
+               if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
                        netdev_dbg(dev, "no route to %pI6\n",
                                   &dst->sin6.sin6_addr);
                        dev->stats.tx_carrier_errors++;
@@ -2018,11 +2066,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
-               md.vni = htonl(vni << 8);
-               md.gbp = skb->mark;
+               md->gbp = skb->mark;
 
                err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
-                                     0, ttl, src_port, dst_port, &md,
+                                     0, ttl, src_port, dst_port, htonl(vni << 8), md,
                                      !net_eq(vxlan->net, dev_net(vxlan->dev)),
                                      vxlan->flags);
 #endif
@@ -2051,11 +2098,15 @@ tx_free:
 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       const struct ip_tunnel_info *info;
        struct ethhdr *eth;
        bool did_rsc = false;
        struct vxlan_rdst *rdst, *fdst = NULL;
        struct vxlan_fdb *f;
 
+       /* FIXME: Support IPv6 */
+       info = skb_tunnel_info(skb, AF_INET);
+
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
@@ -2078,6 +2129,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 #endif
        }
 
+       if (vxlan->flags & VXLAN_F_FLOW_BASED &&
+           info && info->mode == IP_TUNNEL_INFO_TX) {
+               vxlan_xmit_one(skb, dev, NULL, false);
+               return NETDEV_TX_OK;
+       }
+
        f = vxlan_find_mac(vxlan, eth->h_dest);
        did_rsc = false;
 
@@ -2143,7 +2200,7 @@ static void vxlan_cleanup(unsigned long arg)
                        if (f->state & NUD_PERMANENT)
                                continue;
 
-                       timeout = f->used + vxlan->age_interval * HZ;
+                       timeout = f->used + vxlan->cfg.age_interval * HZ;
                        if (time_before_eq(timeout, jiffies)) {
                                netdev_dbg(vxlan->dev,
                                           "garbage collect %pM\n",
@@ -2207,8 +2264,8 @@ static int vxlan_open(struct net_device *dev)
        struct vxlan_sock *vs;
        int ret = 0;
 
-       vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
-                           false, vxlan->flags);
+       vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
+                           vxlan->cfg.no_share, vxlan->flags);
        if (IS_ERR(vs))
                return PTR_ERR(vs);
 
@@ -2222,7 +2279,7 @@ static int vxlan_open(struct net_device *dev)
                }
        }
 
-       if (vxlan->age_interval)
+       if (vxlan->cfg.age_interval)
                mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
 
        return ret;
@@ -2380,7 +2437,7 @@ static void vxlan_setup(struct net_device *dev)
        vxlan->age_timer.function = vxlan_cleanup;
        vxlan->age_timer.data = (unsigned long) vxlan;
 
-       vxlan->dst_port = htons(vxlan_port);
+       vxlan->cfg.dst_port = htons(vxlan_port);
 
        vxlan->dev = dev;
 
@@ -2405,6 +2462,8 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
        [IFLA_VXLAN_RSC]        = { .type = NLA_U8 },
        [IFLA_VXLAN_L2MISS]     = { .type = NLA_U8 },
        [IFLA_VXLAN_L3MISS]     = { .type = NLA_U8 },
+       [IFLA_VXLAN_FLOWBASED]  = { .type = NLA_U8 },
+       [IFLA_VXLAN_COLLECT_METADATA]   = { .type = NLA_U8 },
        [IFLA_VXLAN_PORT]       = { .type = NLA_U16 },
        [IFLA_VXLAN_UDP_CSUM]   = { .type = NLA_U8 },
        [IFLA_VXLAN_UDP_ZERO_CSUM6_TX]  = { .type = NLA_U8 },
@@ -2500,7 +2559,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
 
 /* Create new listen socket if needed */
 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
-                                             vxlan_rcv_t *rcv, void *data,
                                              u32 flags)
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2529,8 +2587,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
 
        vs->sock = sock;
        atomic_set(&vs->refcnt, 1);
-       vs->rcv = rcv;
-       vs->data = data;
        vs->flags = (flags & VXLAN_F_RCV_FLAGS);
 
        /* Initialize the vxlan udp offloads structure */
@@ -2554,9 +2610,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
        return vs;
 }
 
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-                                 vxlan_rcv_t *rcv, void *data,
-                                 bool no_share, u32 flags)
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+                                        bool no_share, u32 flags)
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_sock *vs;
@@ -2566,7 +2621,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                spin_lock(&vn->sock_lock);
                vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
                                     flags);
-               if (vs && vs->rcv == rcv) {
+               if (vs) {
                        if (!atomic_add_unless(&vs->refcnt, 1, 0))
                                vs = ERR_PTR(-EBUSY);
                        spin_unlock(&vn->sock_lock);
@@ -2575,58 +2630,38 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                spin_unlock(&vn->sock_lock);
        }
 
-       return vxlan_socket_create(net, port, rcv, data, flags);
+       return vxlan_socket_create(net, port, flags);
 }
-EXPORT_SYMBOL_GPL(vxlan_sock_add);
 
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
-                        struct nlattr *tb[], struct nlattr *data[])
+static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+                              struct vxlan_config *conf)
 {
        struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_rdst *dst = &vxlan->default_dst;
-       __u32 vni;
        int err;
        bool use_ipv6 = false;
-
-       if (!data[IFLA_VXLAN_ID])
-               return -EINVAL;
+       __be16 default_port = vxlan->cfg.dst_port;
 
        vxlan->net = src_net;
 
-       vni = nla_get_u32(data[IFLA_VXLAN_ID]);
-       dst->remote_vni = vni;
-
-       /* Unless IPv6 is explicitly requested, assume IPv4 */
-       dst->remote_ip.sa.sa_family = AF_INET;
-       if (data[IFLA_VXLAN_GROUP]) {
-               dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
-       } else if (data[IFLA_VXLAN_GROUP6]) {
-               if (!IS_ENABLED(CONFIG_IPV6))
-                       return -EPFNOSUPPORT;
+       dst->remote_vni = conf->vni;
 
-               dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
-               dst->remote_ip.sa.sa_family = AF_INET6;
-               use_ipv6 = true;
-       }
+       memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
 
-       if (data[IFLA_VXLAN_LOCAL]) {
-               vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
-               vxlan->saddr.sa.sa_family = AF_INET;
-       } else if (data[IFLA_VXLAN_LOCAL6]) {
-               if (!IS_ENABLED(CONFIG_IPV6))
-                       return -EPFNOSUPPORT;
+       /* Unless IPv6 is explicitly requested, assume IPv4 */
+       if (!dst->remote_ip.sa.sa_family)
+               dst->remote_ip.sa.sa_family = AF_INET;
 
-               /* TODO: respect scope id */
-               vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
-               vxlan->saddr.sa.sa_family = AF_INET6;
+       if (dst->remote_ip.sa.sa_family == AF_INET6 ||
+           vxlan->cfg.saddr.sa.sa_family == AF_INET6)
                use_ipv6 = true;
-       }
 
-       if (data[IFLA_VXLAN_LINK] &&
-           (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
+       if (conf->remote_ifindex) {
                struct net_device *lowerdev
-                        = __dev_get_by_index(src_net, dst->remote_ifindex);
+                        = __dev_get_by_index(src_net, conf->remote_ifindex);
+
+               dst->remote_ifindex = conf->remote_ifindex;
 
                if (!lowerdev) {
                        pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2644,7 +2679,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                }
 #endif
 
-               if (!tb[IFLA_MTU])
+               if (!conf->mtu)
                        dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 
                dev->needed_headroom = lowerdev->hard_header_len +
@@ -2652,101 +2687,192 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        } else if (use_ipv6)
                vxlan->flags |= VXLAN_F_IPV6;
 
+       memcpy(&vxlan->cfg, conf, sizeof(*conf));
+       if (!vxlan->cfg.dst_port)
+               vxlan->cfg.dst_port = default_port;
+       vxlan->flags |= conf->flags;
+
+       if (!vxlan->cfg.age_interval)
+               vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
+
+       if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
+                          vxlan->cfg.dst_port, vxlan->flags))
+               return -EEXIST;
+
+       dev->ethtool_ops = &vxlan_ethtool_ops;
+
+       /* create an fdb entry for a valid default destination */
+       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+               err = vxlan_fdb_create(vxlan, all_zeros_mac,
+                                      &vxlan->default_dst.remote_ip,
+                                      NUD_REACHABLE|NUD_PERMANENT,
+                                      NLM_F_EXCL|NLM_F_CREATE,
+                                      vxlan->cfg.dst_port,
+                                      vxlan->default_dst.remote_vni,
+                                      vxlan->default_dst.remote_ifindex,
+                                      NTF_SELF);
+               if (err)
+                       return err;
+       }
+
+       err = register_netdevice(dev);
+       if (err) {
+               vxlan_fdb_delete_default(vxlan);
+               return err;
+       }
+
+       list_add(&vxlan->next, &vn->vxlan_list);
+
+       return 0;
+}
+
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+                                   u8 name_assign_type, struct vxlan_config *conf)
+{
+       struct nlattr *tb[IFLA_MAX+1];
+       struct net_device *dev;
+       int err;
+
+       memset(&tb, 0, sizeof(tb));
+
+       dev = rtnl_create_link(net, name, name_assign_type,
+                              &vxlan_link_ops, tb);
+       if (IS_ERR(dev))
+               return dev;
+
+       err = vxlan_dev_configure(net, dev, conf);
+       if (err < 0) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
+
+       return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+                        struct nlattr *tb[], struct nlattr *data[])
+{
+       struct vxlan_config conf;
+       int err;
+
+       if (!data[IFLA_VXLAN_ID])
+               return -EINVAL;
+
+       memset(&conf, 0, sizeof(conf));
+       conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+       if (data[IFLA_VXLAN_GROUP]) {
+               conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+       } else if (data[IFLA_VXLAN_GROUP6]) {
+               if (!IS_ENABLED(CONFIG_IPV6))
+                       return -EPFNOSUPPORT;
+
+               conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
+               conf.remote_ip.sa.sa_family = AF_INET6;
+       }
+
+       if (data[IFLA_VXLAN_LOCAL]) {
+               conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
+               conf.saddr.sa.sa_family = AF_INET;
+       } else if (data[IFLA_VXLAN_LOCAL6]) {
+               if (!IS_ENABLED(CONFIG_IPV6))
+                       return -EPFNOSUPPORT;
+
+               /* TODO: respect scope id */
+               conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
+               conf.saddr.sa.sa_family = AF_INET6;
+       }
+
+       if (data[IFLA_VXLAN_LINK])
+               conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
        if (data[IFLA_VXLAN_TOS])
-               vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
+               conf.tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
 
        if (data[IFLA_VXLAN_TTL])
-               vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+               conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
 
        if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
-               vxlan->flags |= VXLAN_F_LEARN;
+               conf.flags |= VXLAN_F_LEARN;
 
        if (data[IFLA_VXLAN_AGEING])
-               vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
-       else
-               vxlan->age_interval = FDB_AGE_DEFAULT;
+               conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
 
        if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
-               vxlan->flags |= VXLAN_F_PROXY;
+               conf.flags |= VXLAN_F_PROXY;
 
        if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
-               vxlan->flags |= VXLAN_F_RSC;
+               conf.flags |= VXLAN_F_RSC;
 
        if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
-               vxlan->flags |= VXLAN_F_L2MISS;
+               conf.flags |= VXLAN_F_L2MISS;
 
        if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
-               vxlan->flags |= VXLAN_F_L3MISS;
+               conf.flags |= VXLAN_F_L3MISS;
 
        if (data[IFLA_VXLAN_LIMIT])
-               vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+               conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+       if (data[IFLA_VXLAN_FLOWBASED] &&
+           nla_get_u8(data[IFLA_VXLAN_FLOWBASED]))
+               conf.flags |= VXLAN_F_FLOW_BASED;
+
+       if (data[IFLA_VXLAN_COLLECT_METADATA] &&
+           nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
+               conf.flags |= VXLAN_F_COLLECT_METADATA;
 
        if (data[IFLA_VXLAN_PORT_RANGE]) {
                const struct ifla_vxlan_port_range *p
                        = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
-               vxlan->port_min = ntohs(p->low);
-               vxlan->port_max = ntohs(p->high);
+               conf.port_min = ntohs(p->low);
+               conf.port_max = ntohs(p->high);
        }
 
        if (data[IFLA_VXLAN_PORT])
-               vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+               conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
 
        if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
-               vxlan->flags |= VXLAN_F_UDP_CSUM;
+               conf.flags |= VXLAN_F_UDP_CSUM;
 
        if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
-               vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+               conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
 
        if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
-               vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+               conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
 
        if (data[IFLA_VXLAN_REMCSUM_TX] &&
            nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
-               vxlan->flags |= VXLAN_F_REMCSUM_TX;
+               conf.flags |= VXLAN_F_REMCSUM_TX;
 
        if (data[IFLA_VXLAN_REMCSUM_RX] &&
            nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
-               vxlan->flags |= VXLAN_F_REMCSUM_RX;
+               conf.flags |= VXLAN_F_REMCSUM_RX;
 
        if (data[IFLA_VXLAN_GBP])
-               vxlan->flags |= VXLAN_F_GBP;
+               conf.flags |= VXLAN_F_GBP;
 
        if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
-               vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
-
-       if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
-                          vxlan->dst_port, vxlan->flags)) {
-               pr_info("duplicate VNI %u\n", vni);
-               return -EEXIST;
-       }
+               conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
 
-       dev->ethtool_ops = &vxlan_ethtool_ops;
+       err = vxlan_dev_configure(src_net, dev, &conf);
+       switch (err) {
+       case -ENODEV:
+               pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
+               break;
 
-       /* create an fdb entry for a valid default destination */
-       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
-               err = vxlan_fdb_create(vxlan, all_zeros_mac,
-                                      &vxlan->default_dst.remote_ip,
-                                      NUD_REACHABLE|NUD_PERMANENT,
-                                      NLM_F_EXCL|NLM_F_CREATE,
-                                      vxlan->dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_ifindex,
-                                      NTF_SELF);
-               if (err)
-                       return err;
-       }
+       case -EPERM:
+               pr_info("IPv6 is disabled via sysctl\n");
+               break;
 
-       err = register_netdevice(dev);
-       if (err) {
-               vxlan_fdb_delete_default(vxlan);
-               return err;
+       case -EEXIST:
+               pr_info("duplicate VNI %u\n", conf.vni);
+               break;
        }
 
-       list_add(&vxlan->next, &vn->vxlan_list);
-
-       return 0;
+       return err;
 }
 
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
@@ -2777,6 +2903,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_RSC */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_L2MISS */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_L3MISS */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_FLOWBASED */
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
                nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -2794,8 +2921,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        const struct vxlan_dev *vxlan = netdev_priv(dev);
        const struct vxlan_rdst *dst = &vxlan->default_dst;
        struct ifla_vxlan_port_range ports = {
-               .low =  htons(vxlan->port_min),
-               .high = htons(vxlan->port_max),
+               .low =  htons(vxlan->cfg.port_min),
+               .high = htons(vxlan->cfg.port_max),
        };
 
        if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
@@ -2818,22 +2945,22 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
                goto nla_put_failure;
 
-       if (!vxlan_addr_any(&vxlan->saddr)) {
-               if (vxlan->saddr.sa.sa_family == AF_INET) {
+       if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
+               if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
                        if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
-                                           vxlan->saddr.sin.sin_addr.s_addr))
+                                           vxlan->cfg.saddr.sin.sin_addr.s_addr))
                                goto nla_put_failure;
 #if IS_ENABLED(CONFIG_IPV6)
                } else {
                        if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
-                                            &vxlan->saddr.sin6.sin6_addr))
+                                            &vxlan->cfg.saddr.sin6.sin6_addr))
                                goto nla_put_failure;
 #endif
                }
        }
 
-       if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
-           nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+       if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+           nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
            nla_put_u8(skb, IFLA_VXLAN_LEARNING,
                        !!(vxlan->flags & VXLAN_F_LEARN)) ||
            nla_put_u8(skb, IFLA_VXLAN_PROXY,
@@ -2843,9 +2970,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
                        !!(vxlan->flags & VXLAN_F_L2MISS)) ||
            nla_put_u8(skb, IFLA_VXLAN_L3MISS,
                        !!(vxlan->flags & VXLAN_F_L3MISS)) ||
-           nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
-           nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
-           nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
+           nla_put_u8(skb, IFLA_VXLAN_FLOWBASED,
+                      !!(vxlan->flags & VXLAN_F_FLOW_BASED)) ||
+           nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
+           nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
+           nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
                        !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
index 9729e69416358a120ef0c97cd3feb034cb0ca6bb..c04fb00e7930219f909939b9ea36140ad3101fb8 100644 (file)
@@ -11,7 +11,8 @@ ath10k_core-y += mac.o \
                 wmi-tlv.o \
                 bmi.o \
                 hw.o \
-                p2p.o
+                p2p.o \
+                swap.o
 
 ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
index 31a990635490aa07cf74a515e620c90fc1b697c8..df7c7616533b08636374736911ca3f624983a47e 100644 (file)
@@ -178,7 +178,7 @@ struct bmi_target_info {
 };
 
 /* in msec */
-#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
 
 #define BMI_CE_NUM_TO_TARG 0
 #define BMI_CE_NUM_TO_HOST 1
index e508c65b6ba8a6d62ffbcca77e5d8f41ff036ad3..cf28fbebaedcfc9b372d509c6a88fcbc2d808773 100644 (file)
@@ -452,6 +452,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 {
        struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
        unsigned int nentries_mask = dest_ring->nentries_mask;
+       struct ath10k *ar = ce_state->ar;
        unsigned int sw_index = dest_ring->sw_index;
 
        struct ce_desc *base = dest_ring->base_addr_owner_space;
index 0eddb204d85bb9b08dcb84f55530b25fb3b901e8..5c903e15dd65e6ad7da14caffb1e3a4fd8fe4bfc 100644 (file)
@@ -21,7 +21,7 @@
 #include "hif.h"
 
 /* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 8
+#define CE_COUNT_MAX 12
 #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
 
 /* Descriptor rings must be aligned to this boundary */
@@ -38,8 +38,13 @@ struct ath10k_ce_pipe;
 
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
-#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB  2
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS     (1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS      (1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB  ar->hw_values->ce_desc_meta_data_lsb
 
 struct ce_desc {
        __le32 addr;
@@ -423,8 +428,10 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 
 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
 
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB              8
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK             0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+                               ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+                               ar->regs->ce_wrap_intr_sum_host_msi_mask
 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
        (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
                CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
index 59496a90ad5e241563f5bcaa8226ebfde87bd478..25510679fd2ed643d81f782eb3edc37e9c9f9a86 100644 (file)
 #include "wmi-ops.h"
 
 unsigned int ath10k_debug_mask;
+static unsigned int ath10k_cryptmode_param;
 static bool uart_print;
 static bool skip_otp;
 
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
 module_param(uart_print, bool, 0644);
 module_param(skip_otp, bool, 0644);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
        {
@@ -49,6 +52,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
                .has_shifted_cc_wraparound = true,
+               .otp_exe_param = 0,
                .fw = {
                        .dir = QCA988X_HW_2_0_FW_DIR,
                        .fw = QCA988X_HW_2_0_FW_FILE,
@@ -63,6 +67,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .name = "qca6174 hw2.1",
                .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
                .uart_pin = 6,
+               .otp_exe_param = 0,
                .fw = {
                        .dir = QCA6174_HW_2_1_FW_DIR,
                        .fw = QCA6174_HW_2_1_FW_FILE,
@@ -77,6 +82,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .name = "qca6174 hw3.0",
                .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
+               .otp_exe_param = 0,
                .fw = {
                        .dir = QCA6174_HW_3_0_FW_DIR,
                        .fw = QCA6174_HW_3_0_FW_FILE,
@@ -91,6 +97,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .name = "qca6174 hw3.2",
                .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
+               .otp_exe_param = 0,
                .fw = {
                        /* uses same binaries as hw3.0 */
                        .dir = QCA6174_HW_3_0_FW_DIR,
@@ -101,8 +108,68 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
                },
        },
+       {
+               .id = QCA99X0_HW_2_0_DEV_VERSION,
+               .name = "qca99x0 hw2.0",
+               .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+               .uart_pin = 7,
+               .otp_exe_param = 0x00000700,
+               .continuous_frag_desc = true,
+               .fw = {
+                       .dir = QCA99X0_HW_2_0_FW_DIR,
+                       .fw = QCA99X0_HW_2_0_FW_FILE,
+                       .otp = QCA99X0_HW_2_0_OTP_FILE,
+                       .board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
+                       .board_size = QCA99X0_BOARD_DATA_SZ,
+                       .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+               },
+       },
+};
+
+static const char *const ath10k_core_fw_feature_str[] = {
+       [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+       [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+       [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+       [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+       [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+       [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+       [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+       [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+       [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+       [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
 };
 
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+                                                  size_t buf_len,
+                                                  enum ath10k_fw_features feat)
+{
+       if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+           WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+               return scnprintf(buf, buf_len, "bit%d", feat);
+       }
+
+       return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+                                    char *buf,
+                                    size_t buf_len)
+{
+       unsigned int len = 0;
+       int i;
+
+       for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+               if (test_bit(i, ar->fw_features)) {
+                       if (len > 0)
+                               len += scnprintf(buf + len, buf_len - len, ",");
+
+                       len += ath10k_core_get_fw_feature_str(buf + len,
+                                                             buf_len - len,
+                                                             i);
+               }
+       }
+}
+
 static void ath10k_send_suspend_complete(struct ath10k *ar)
 {
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
@@ -355,6 +422,7 @@ out:
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
        u32 result, address = ar->hw_params.patch_load_addr;
+       u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
        int ret;
 
        ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
@@ -380,7 +448,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
                return ret;
        }
 
-       ret = ath10k_bmi_execute(ar, address, 0, &result);
+       ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
        if (ret) {
                ath10k_err(ar, "could not execute otp (%d)\n", ret);
                return ret;
@@ -412,6 +480,13 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
                data = ar->firmware_data;
                data_len = ar->firmware_len;
                mode_name = "normal";
+               ret = ath10k_swap_code_seg_configure(ar,
+                               ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+               if (ret) {
+                       ath10k_err(ar, "failed to configure fw code swap: %d\n",
+                                  ret);
+                       return ret;
+               }
                break;
        case ATH10K_FIRMWARE_MODE_UTF:
                data = ar->testmode.utf->data;
@@ -451,6 +526,8 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
        if (!IS_ERR(ar->cal_file))
                release_firmware(ar->cal_file);
 
+       ath10k_swap_code_seg_release(ar);
+
        ar->board = NULL;
        ar->board_data = NULL;
        ar->board_len = 0;
@@ -464,6 +541,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
        ar->firmware_len = 0;
 
        ar->cal_file = NULL;
+
 }
 
 static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -737,6 +815,13 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                        ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
                                   ar->htt.op_version);
                        break;
+               case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                                  "found fw code swap image ie (%zd B)\n",
+                                  ie_len);
+                       ar->swap.firmware_codeswap_data = data;
+                       ar->swap.firmware_codeswap_len = ie_len;
+                       break;
                default:
                        ath10k_warn(ar, "Unknown FW IE: %u\n",
                                    le32_to_cpu(hdr->id));
@@ -991,6 +1076,46 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                return -EINVAL;
        }
 
+       ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+       switch (ath10k_cryptmode_param) {
+       case ATH10K_CRYPT_MODE_HW:
+               clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+               clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+               break;
+       case ATH10K_CRYPT_MODE_SW:
+               if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+                             ar->fw_features)) {
+                       ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+                       return -EINVAL;
+               }
+
+               set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+               set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+               break;
+       default:
+               ath10k_info(ar, "invalid cryptmode: %d\n",
+                           ath10k_cryptmode_param);
+               return -EINVAL;
+       }
+
+       ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+       ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+       if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+               ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+               /* Workaround:
+                *
+                * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+                * and causes enormous performance issues (malformed frames,
+                * etc).
+                *
+                * Disabling A-MSDU makes RAW mode stable with heavy traffic
+                * albeit a bit slower compared to regular operation.
+                */
+               ar->htt.max_num_amsdu = 1;
+       }
+
        /* Backwards compatibility for firmwares without
         * ATH10K_FW_IE_WMI_OP_VERSION.
         */
@@ -1014,6 +1139,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
                ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
                        WMI_STAT_PEER;
+               ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
                break;
        case ATH10K_FW_WMI_OP_VERSION_10_1:
        case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -1023,6 +1149,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
                ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
                ar->fw_stats_req_mask = WMI_STAT_PEER;
+               ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
                break;
        case ATH10K_FW_WMI_OP_VERSION_TLV:
                ar->max_num_peers = TARGET_TLV_NUM_PEERS;
@@ -1033,6 +1160,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
                ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
                        WMI_STAT_PEER;
+               ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+               break;
+       case ATH10K_FW_WMI_OP_VERSION_10_4:
+               ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+               ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+               ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+               ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+               ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+               ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+               ar->fw_stats_req_mask = WMI_STAT_PEER;
+               ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
                break;
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1056,6 +1194,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                case ATH10K_FW_WMI_OP_VERSION_TLV:
                        ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
                        break;
+               case ATH10K_FW_WMI_OP_VERSION_10_4:
                case ATH10K_FW_WMI_OP_VERSION_UNSET:
                case ATH10K_FW_WMI_OP_VERSION_MAX:
                        WARN_ON(1);
@@ -1330,6 +1469,13 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                goto err_free_firmware_files;
        }
 
+       ret = ath10k_swap_code_seg_init(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+                          ret);
+               goto err_free_firmware_files;
+       }
+
        mutex_lock(&ar->conf_mutex);
 
        ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -1470,9 +1616,15 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
        switch (hw_rev) {
        case ATH10K_HW_QCA988X:
                ar->regs = &qca988x_regs;
+               ar->hw_values = &qca988x_values;
                break;
        case ATH10K_HW_QCA6174:
                ar->regs = &qca6174_regs;
+               ar->hw_values = &qca6174_values;
+               break;
+       case ATH10K_HW_QCA99X0:
+               ar->regs = &qca99x0_regs;
+               ar->hw_values = &qca99x0_values;
                break;
        default:
                ath10k_err(ar, "unsupported core hardware revision %d\n",
@@ -1497,6 +1649,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
        if (!ar->workqueue)
                goto err_free_mac;
 
+       ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+       if (!ar->workqueue_aux)
+               goto err_free_wq;
+
        mutex_init(&ar->conf_mutex);
        spin_lock_init(&ar->data_lock);
 
@@ -1517,10 +1673,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
 
        ret = ath10k_debug_create(ar);
        if (ret)
-               goto err_free_wq;
+               goto err_free_aux_wq;
 
        return ar;
 
+err_free_aux_wq:
+       destroy_workqueue(ar->workqueue_aux);
 err_free_wq:
        destroy_workqueue(ar->workqueue);
 
@@ -1536,6 +1694,9 @@ void ath10k_core_destroy(struct ath10k *ar)
        flush_workqueue(ar->workqueue);
        destroy_workqueue(ar->workqueue);
 
+       flush_workqueue(ar->workqueue_aux);
+       destroy_workqueue(ar->workqueue_aux);
+
        ath10k_debug_destroy(ar);
        ath10k_mac_destroy(ar);
 }
index 78094f23c9dd5264a66d32167680a8af8728a8ff..6a387bac27b0f8bea60d1ee7a7c4eba223fa4545 100644 (file)
@@ -36,6 +36,7 @@
 #include "spectral.h"
 #include "thermal.h"
 #include "wow.h"
+#include "swap.h"
 
 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -91,6 +92,7 @@ struct ath10k_skb_cb {
                u8 tid;
                u16 freq;
                bool is_offchan;
+               bool nohwcrypt;
                struct ath10k_htt_txbuf *txbuf;
                u32 txbuf_paddr;
        } __packed htt;
@@ -151,6 +153,7 @@ struct ath10k_wmi {
        const struct wmi_ops *ops;
 
        u32 num_mem_chunks;
+       u32 rx_decap_mode;
        struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
 };
 
@@ -327,8 +330,8 @@ struct ath10k_vif {
                        u32 uapsd;
                } sta;
                struct {
-                       /* 127 stations; wmi limit */
-                       u8 tim_bitmap[16];
+                       /* 512 stations */
+                       u8 tim_bitmap[64];
                        u8 tim_len;
                        u32 ssid_len;
                        u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -340,6 +343,7 @@ struct ath10k_vif {
        } u;
 
        bool use_cts_prot;
+       bool nohwcrypt;
        int num_legacy_stations;
        int txpower;
        struct wmi_wmm_params_all_arg wmm_params;
@@ -381,9 +385,6 @@ struct ath10k_debug {
        u32 reg_addr;
        u32 nf_cal_period;
 
-       u8 htt_max_amsdu;
-       u8 htt_max_ampdu;
-
        struct ath10k_fw_crash_data *fw_crash_data;
 };
 
@@ -452,16 +453,21 @@ enum ath10k_fw_features {
        ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
 
        /* Don't trust error code from otp.bin */
-       ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+       ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
 
        /* Some firmware revisions pad 4th hw address to 4 byte boundary making
         * it 8 bytes long in Native Wifi Rx decap.
         */
-       ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+       ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
 
        /* Firmware supports bypassing PLL setting on init. */
        ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
 
+       /* Raw mode support. If supported, FW supports receiving and trasmitting
+        * frames in raw mode.
+        */
+       ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
        /* keep last */
        ATH10K_FW_FEATURE_COUNT,
 };
@@ -475,6 +481,15 @@ enum ath10k_dev_flags {
         * waiters should immediately cancel instead of waiting for a time out.
         */
        ATH10K_FLAG_CRASH_FLUSH,
+
+       /* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+        * Raw mode supports both hardware and software crypto. Native WiFi only
+        * supports hardware crypto.
+        */
+       ATH10K_FLAG_RAW_MODE,
+
+       /* Disable HW crypto engine */
+       ATH10K_FLAG_HW_CRYPTO_DISABLED,
 };
 
 enum ath10k_cal_mode {
@@ -483,6 +498,13 @@ enum ath10k_cal_mode {
        ATH10K_CAL_MODE_DT,
 };
 
+enum ath10k_crypt_mode {
+       /* Only use hardware crypto engine */
+       ATH10K_CRYPT_MODE_HW,
+       /* Only use software crypto engine */
+       ATH10K_CRYPT_MODE_SW,
+};
+
 static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
 {
        switch (mode) {
@@ -545,6 +567,7 @@ struct ath10k {
        u32 ht_cap_info;
        u32 vht_cap_info;
        u32 num_rf_chains;
+       u32 max_spatial_stream;
        /* protected by conf_mutex */
        bool ani_enabled;
 
@@ -560,6 +583,7 @@ struct ath10k {
        struct completion target_suspend;
 
        const struct ath10k_hw_regs *regs;
+       const struct ath10k_hw_values *hw_values;
        struct ath10k_bmi bmi;
        struct ath10k_wmi wmi;
        struct ath10k_htc htc;
@@ -570,6 +594,7 @@ struct ath10k {
                const char *name;
                u32 patch_load_addr;
                int uart_pin;
+               u32 otp_exe_param;
 
                /* This is true if given HW chip has a quirky Cycle Counter
                 * wraparound which resets to 0x7fffffff instead of 0. All
@@ -578,6 +603,12 @@ struct ath10k {
                 */
                bool has_shifted_cc_wraparound;
 
+               /* Some of chip expects fragment descriptor to be continuous
+                * memory for any TX operation. Set continuous_frag_desc flag
+                * for the hardware which have such requirement.
+                */
+               bool continuous_frag_desc;
+
                struct ath10k_hw_params_fw {
                        const char *dir;
                        const char *fw;
@@ -602,6 +633,12 @@ struct ath10k {
 
        const struct firmware *cal_file;
 
+       struct {
+               const void *firmware_codeswap_data;
+               size_t firmware_codeswap_len;
+               struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+       } swap;
+
        char spec_board_id[100];
        bool spec_board_loaded;
 
@@ -617,6 +654,7 @@ struct ath10k {
                bool is_roc;
                int vdev_id;
                int roc_freq;
+               bool roc_notify;
        } scan;
 
        struct {
@@ -656,6 +694,8 @@ struct ath10k {
        struct completion vdev_setup_done;
 
        struct workqueue_struct *workqueue;
+       /* Auxiliary workqueue */
+       struct workqueue_struct *workqueue_aux;
 
        /* prevents concurrent FW reconfiguration */
        struct mutex conf_mutex;
@@ -675,6 +715,11 @@ struct ath10k {
        int max_num_stations;
        int max_num_vdevs;
        int max_num_tdls_vdevs;
+       int num_active_peers;
+       int num_tids;
+
+       struct work_struct svc_rdy_work;
+       struct sk_buff *svc_rdy_skb;
 
        struct work_struct offchan_tx_work;
        struct sk_buff_head offchan_tx_queue;
@@ -749,6 +794,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
                                  enum ath10k_hw_rev hw_rev,
                                  const struct ath10k_hif_ops *hif_ops);
 void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+                                    char *buf,
+                                    size_t max_len);
 
 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
index 8fa606a9c4ddaf3f95b43d80723a0fbcfe59a010..f7aa1c73b4814968420dc50209b302a8a676c383 100644 (file)
@@ -124,7 +124,11 @@ EXPORT_SYMBOL(ath10k_info);
 
 void ath10k_print_driver_info(struct ath10k *ar)
 {
-       ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+       char fw_features[128] = {};
+
+       ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+       ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
                    ar->hw_params.name,
                    ar->target_version,
                    ar->chip_id,
@@ -137,8 +141,12 @@ void ath10k_print_driver_info(struct ath10k *ar)
                    ar->htt.target_version_major,
                    ar->htt.target_version_minor,
                    ar->wmi.op_version,
+                   ar->htt.op_version,
                    ath10k_cal_mode_str(ar->cal_mode),
-                   ar->max_num_stations);
+                   ar->max_num_stations,
+                   test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+                   !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
+                   fw_features);
        ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
                    config_enabled(CONFIG_ATH10K_DEBUG),
                    config_enabled(CONFIG_ATH10K_DEBUGFS),
@@ -1357,12 +1365,8 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
 
        mutex_lock(&ar->conf_mutex);
 
-       if (ar->debug.htt_max_amsdu)
-               amsdu = ar->debug.htt_max_amsdu;
-
-       if (ar->debug.htt_max_ampdu)
-               ampdu = ar->debug.htt_max_ampdu;
-
+       amsdu = ar->htt.max_num_amsdu;
+       ampdu = ar->htt.max_num_ampdu;
        mutex_unlock(&ar->conf_mutex);
 
        len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
@@ -1396,8 +1400,8 @@ static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
                goto out;
 
        res = count;
-       ar->debug.htt_max_amsdu = amsdu;
-       ar->debug.htt_max_ampdu = ampdu;
+       ar->htt.max_num_amsdu = amsdu;
+       ar->htt.max_num_ampdu = ampdu;
 
 out:
        mutex_unlock(&ar->conf_mutex);
@@ -1899,9 +1903,6 @@ void ath10k_debug_stop(struct ath10k *ar)
        if (ar->debug.htt_stats_mask != 0)
                cancel_delayed_work(&ar->debug.htt_stats_dwork);
 
-       ar->debug.htt_max_amsdu = 0;
-       ar->debug.htt_max_ampdu = 0;
-
        ath10k_wmi_pdev_pktlog_disable(ar);
 }
 
index 6da6ef26143af0caeac2ed8ed76b14f28319b471..3e6ba63dfdffe118b8d6e62a4fb086c376f1e3a5 100644 (file)
@@ -102,6 +102,43 @@ static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
        [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
 };
 
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+       [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+       [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+       [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+       [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+       [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+       [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+       [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+       [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+       [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+       [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+       [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+       [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+       [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+       [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+                               HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+       [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+                               HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+       [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+       [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+                               HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+       [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+       [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+                               HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+       [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+       [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+       [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+       [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+                               HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+       [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
+                               HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+       [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+                               HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+       [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
+                               HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+};
+
 int ath10k_htt_connect(struct ath10k_htt *htt)
 {
        struct ath10k_htc_svc_conn_req conn_req;
@@ -147,6 +184,10 @@ int ath10k_htt_init(struct ath10k *ar)
                2; /* ip4 dscp or ip6 priority */
 
        switch (ar->htt.op_version) {
+       case ATH10K_FW_HTT_OP_VERSION_10_4:
+               ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+               ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+               break;
        case ATH10K_FW_HTT_OP_VERSION_10_1:
                ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
                ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
@@ -205,8 +246,31 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
        }
 
        status = ath10k_htt_verify_version(htt);
+       if (status) {
+               ath10k_warn(ar, "failed to verify htt version: %d\n",
+                           status);
+               return status;
+       }
+
+       status = ath10k_htt_send_frag_desc_bank_cfg(htt);
        if (status)
                return status;
 
-       return ath10k_htt_send_rx_ring_cfg_ll(htt);
+       status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+       if (status) {
+               ath10k_warn(ar, "failed to setup rx ring: %d\n",
+                           status);
+               return status;
+       }
+
+       status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+                                            htt->max_num_ampdu,
+                                            htt->max_num_amsdu);
+       if (status) {
+               ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+                           status);
+               return status;
+       }
+
+       return 0;
 }
index 7e8a0d83566379b01fec63235d968a9913639b52..57318751289555b8e956ef2441c0f2ffafc83835 100644 (file)
@@ -83,10 +83,39 @@ struct htt_ver_req {
  * around the mask + shift defs.
  */
 struct htt_data_tx_desc_frag {
-       __le32 paddr;
-       __le32 len;
+       union {
+               struct double_word_addr {
+                       __le32 paddr;
+                       __le32 len;
+               } __packed dword_addr;
+               struct triple_word_addr {
+                       __le32 paddr_lo;
+                       __le16 paddr_hi;
+                       __le16 len_16;
+               } __packed tword_addr;
+       } __packed;
 } __packed;
 
+struct htt_msdu_ext_desc {
+       __le32 tso_flag[3];
+       __le16 ip_identification;
+       u8 flags;
+       u8 reserved;
+       struct htt_data_tx_desc_frag frags[6];
+};
+
+#define        HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE         BIT(0)
+#define        HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE     BIT(1)
+#define        HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE     BIT(2)
+#define        HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE     BIT(3)
+#define        HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE     BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+                                | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+                                | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+                                | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+                                | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
 enum htt_data_tx_desc_flags0 {
        HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
        HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
@@ -255,6 +284,9 @@ struct htt_aggr_conf {
 } __packed;
 
 #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+       __le32 rate;
+} __packed;
 
 struct htt_mgmt_tx_desc {
        u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
@@ -263,6 +295,9 @@ struct htt_mgmt_tx_desc {
        __le32 len;
        __le32 vdev_id;
        u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+       union {
+               struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+       } __packed;
 } __packed;
 
 enum htt_mgmt_tx_status {
@@ -349,6 +384,38 @@ enum htt_tlv_t2h_msg_type {
        HTT_TLV_T2H_NUM_MSGS
 };
 
+enum htt_10_4_t2h_msg_type {
+       HTT_10_4_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
+       HTT_10_4_T2H_MSG_TYPE_RX_IND                 = 0x1,
+       HTT_10_4_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
+       HTT_10_4_T2H_MSG_TYPE_PEER_MAP               = 0x3,
+       HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
+       HTT_10_4_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
+       HTT_10_4_T2H_MSG_TYPE_RX_DELBA               = 0x6,
+       HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
+       HTT_10_4_T2H_MSG_TYPE_PKTLOG                 = 0x8,
+       HTT_10_4_T2H_MSG_TYPE_STATS_CONF             = 0x9,
+       HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
+       HTT_10_4_T2H_MSG_TYPE_SEC_IND                = 0xb,
+       HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
+       HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
+       HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND      = 0xe,
+       HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE            = 0xf,
+       HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND   = 0x10,
+       HTT_10_4_T2H_MSG_TYPE_RX_PN_IND              = 0x11,
+       HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+       HTT_10_4_T2H_MSG_TYPE_TEST                   = 0x13,
+       HTT_10_4_T2H_MSG_TYPE_EN_STATS               = 0x14,
+       HTT_10_4_T2H_MSG_TYPE_AGGR_CONF              = 0x15,
+       HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND           = 0x16,
+       HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF          = 0x17,
+       HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD         = 0x18,
+       /* 0x19 to 0x2f are reserved */
+       HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND     = 0x30,
+       /* keep this last */
+       HTT_10_4_T2H_NUM_MSGS
+};
+
 enum htt_t2h_msg_type {
        HTT_T2H_MSG_TYPE_VERSION_CONF,
        HTT_T2H_MSG_TYPE_RX_IND,
@@ -375,6 +442,10 @@ enum htt_t2h_msg_type {
        HTT_T2H_MSG_TYPE_AGGR_CONF,
        HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
        HTT_T2H_MSG_TYPE_TEST,
+       HTT_T2H_MSG_TYPE_EN_STATS,
+       HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+       HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+       HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
        /* keep this last */
        HTT_T2H_NUM_MSGS
 };
@@ -1325,6 +1396,8 @@ struct ath10k_htt {
        u8 target_version_minor;
        struct completion target_version_received;
        enum ath10k_fw_htt_op_version op_version;
+       u8 max_num_amsdu;
+       u8 max_num_ampdu;
 
        const enum htt_t2h_msg_type *t2h_msg_types;
        u32 t2h_msg_types_max;
@@ -1430,6 +1503,11 @@ struct ath10k_htt {
 
        /* rx_status template */
        struct ieee80211_rx_status rx_status;
+
+       struct {
+               dma_addr_t paddr;
+               struct htt_msdu_ext_desc *vaddr;
+       } frag_desc;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1482,6 +1560,12 @@ struct htt_rx_desc {
 #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
 #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
 
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
 int ath10k_htt_connect(struct ath10k_htt *htt);
 int ath10k_htt_init(struct ath10k *ar);
 int ath10k_htt_setup(struct ath10k_htt *htt);
@@ -1497,6 +1581,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
                                u8 max_subfrms_ampdu,
index 89eb16b30fc42479a3b1c11a7b9b3fd88c043490..1b7a04366256febc8438e02a01eb979102cb1d39 100644 (file)
@@ -368,7 +368,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
                                        & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
                                           RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
-               msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+               msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
                              RX_MSDU_START_INFO0_MSDU_LENGTH);
                msdu_chained = rx_desc->frag_info.ring2_more_count;
 
@@ -394,7 +394,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                        msdu_chaining = 1;
                }
 
-               last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+               last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
                                RX_MSDU_END_INFO0_LAST_MSDU;
 
                trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
@@ -740,7 +740,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
            __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
                return NULL;
 
-       if (!(rxd->msdu_end.info0 &
+       if (!(rxd->msdu_end.common.info0 &
              __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
                return NULL;
 
@@ -991,9 +991,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
        bool is_last;
 
        rxd = (void *)msdu->data - sizeof(*rxd);
-       is_first = !!(rxd->msdu_end.info0 &
+       is_first = !!(rxd->msdu_end.common.info0 &
                      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
-       is_last = !!(rxd->msdu_end.info0 &
+       is_last = !!(rxd->msdu_end.common.info0 &
                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
 
        /* Delivered decapped frame:
@@ -1017,9 +1017,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
        skb_trim(msdu, msdu->len - FCS_LEN);
 
        /* In most cases this will be true for sniffed frames. It makes sense
-        * to deliver them as-is without stripping the crypto param. This would
-        * also make sense for software based decryption (which is not
-        * implemented in ath10k).
+        * to deliver them as-is without stripping the crypto param. This is
+        * necessary for software based decryption.
         *
         * If there's no error then the frame is decrypted. At least that is
         * the case for frames that come in via fragmented rx indication.
@@ -1104,9 +1103,9 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
        rxd = (void *)msdu->data - sizeof(*rxd);
        hdr = (void *)rxd->rx_hdr_status;
 
-       is_first = !!(rxd->msdu_end.info0 &
+       is_first = !!(rxd->msdu_end.common.info0 &
                      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
-       is_last = !!(rxd->msdu_end.info0 &
+       is_last = !!(rxd->msdu_end.common.info0 &
                     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
        is_amsdu = !(is_first && is_last);
 
@@ -1201,7 +1200,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
 {
        struct htt_rx_desc *rxd;
        enum rx_msdu_decap_format decap;
-       struct ieee80211_hdr *hdr;
 
        /* First msdu's decapped header:
         * [802.11 header] <-- padded to 4 bytes long
@@ -1215,8 +1213,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
         */
 
        rxd = (void *)msdu->data - sizeof(*rxd);
-       hdr = (void *)rxd->rx_hdr_status;
-       decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+       decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
                   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
        switch (decap) {
@@ -1246,7 +1243,7 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
 
        rxd = (void *)skb->data - sizeof(*rxd);
        flags = __le32_to_cpu(rxd->attention.flags);
-       info = __le32_to_cpu(rxd->msdu_start.info1);
+       info = __le32_to_cpu(rxd->msdu_start.common.info1);
 
        is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
        is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@@ -1439,7 +1436,7 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
 
        first = skb_peek(amsdu);
        rxd = (void *)first->data - sizeof(*rxd);
-       decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+       decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
                   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
        if (!chained)
@@ -1633,8 +1630,6 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
        __le16 msdu_id;
        int i;
 
-       lockdep_assert_held(&htt->tx_lock);
-
        switch (status) {
        case HTT_DATA_TX_STATUS_NO_ACK:
                tx_done.no_ack = true;
@@ -1759,14 +1754,14 @@ static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
                __skb_queue_tail(amsdu, msdu);
 
                rxd = (void *)msdu->data - sizeof(*rxd);
-               if (rxd->msdu_end.info0 &
+               if (rxd->msdu_end.common.info0 &
                    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
                        break;
        }
 
        msdu = skb_peek_tail(amsdu);
        rxd = (void *)msdu->data - sizeof(*rxd);
-       if (!(rxd->msdu_end.info0 &
+       if (!(rxd->msdu_end.common.info0 &
              __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
                skb_queue_splice_init(amsdu, list);
                return -EAGAIN;
@@ -2000,15 +1995,11 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                        break;
                }
 
-               spin_lock_bh(&htt->tx_lock);
                ath10k_txrx_tx_unref(htt, &tx_done);
-               spin_unlock_bh(&htt->tx_lock);
                break;
        }
        case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
-               spin_lock_bh(&htt->tx_lock);
-               __skb_queue_tail(&htt->tx_compl_q, skb);
-               spin_unlock_bh(&htt->tx_lock);
+               skb_queue_tail(&htt->tx_compl_q, skb);
                tasklet_schedule(&htt->txrx_compl_task);
                return;
        case HTT_T2H_MSG_TYPE_SEC_IND: {
@@ -2074,6 +2065,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                break;
        case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
                break;
+       case HTT_T2H_MSG_TYPE_AGGR_CONF:
+               break;
+       case HTT_T2H_MSG_TYPE_EN_STATS:
+       case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+       case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
+       case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
        default:
                ath10k_warn(ar, "htt event (%d) not handled\n",
                            resp->hdr.msg_type);
@@ -2093,12 +2090,10 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
        struct htt_resp *resp;
        struct sk_buff *skb;
 
-       spin_lock_bh(&htt->tx_lock);
-       while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+       while ((skb = skb_dequeue(&htt->tx_compl_q))) {
                ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
                dev_kfree_skb_any(skb);
        }
-       spin_unlock_bh(&htt->tx_lock);
 
        spin_lock_bh(&htt->rx_ring.lock);
        while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
index a60ef7d1d5fcb98278b3838858432f4cb3c8930a..704bb5e071938b7e3f56b9b42fa8304c3dbe2fed 100644 (file)
@@ -63,7 +63,8 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
 
        lockdep_assert_held(&htt->tx_lock);
 
-       ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+       ret = idr_alloc(&htt->pending_tx, skb, 0,
+                       htt->max_num_pending_tx, GFP_ATOMIC);
 
        ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
 
@@ -84,6 +85,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
+       int ret, size;
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
                   htt->max_num_pending_tx);
@@ -94,11 +96,31 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
        htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
                                       sizeof(struct ath10k_htt_txbuf), 4, 0);
        if (!htt->tx_pool) {
-               idr_destroy(&htt->pending_tx);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto free_idr_pending_tx;
+       }
+
+       if (!ar->hw_params.continuous_frag_desc)
+               goto skip_frag_desc_alloc;
+
+       size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+       htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+                                                 &htt->frag_desc.paddr,
+                                                 GFP_DMA);
+       if (!htt->frag_desc.vaddr) {
+               ath10k_warn(ar, "failed to alloc fragment desc memory\n");
+               ret = -ENOMEM;
+               goto free_tx_pool;
        }
 
+skip_frag_desc_alloc:
        return 0;
+
+free_tx_pool:
+       dma_pool_destroy(htt->tx_pool);
+free_idr_pending_tx:
+       idr_destroy(&htt->pending_tx);
+       return ret;
 }
 
 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
@@ -112,18 +134,25 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
        tx_done.discard = 1;
        tx_done.msdu_id = msdu_id;
 
-       spin_lock_bh(&htt->tx_lock);
        ath10k_txrx_tx_unref(htt, &tx_done);
-       spin_unlock_bh(&htt->tx_lock);
 
        return 0;
 }
 
 void ath10k_htt_tx_free(struct ath10k_htt *htt)
 {
+       int size;
+
        idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
        idr_destroy(&htt->pending_tx);
        dma_pool_destroy(htt->tx_pool);
+
+       if (htt->frag_desc.vaddr) {
+               size = htt->max_num_pending_tx *
+                                 sizeof(struct htt_msdu_ext_desc);
+               dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
+                                 htt->frag_desc.paddr);
+       }
 }
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -201,6 +230,49 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
        return 0;
 }
 
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       int ret, size;
+
+       if (!ar->hw_params.continuous_frag_desc)
+               return 0;
+
+       if (!htt->frag_desc.paddr) {
+               ath10k_warn(ar, "invalid frag desc memory\n");
+               return -EINVAL;
+       }
+
+       size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+       skb = ath10k_htc_alloc_skb(ar, size);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, size);
+       cmd = (struct htt_cmd *)skb->data;
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+       cmd->frag_desc_bank_cfg.info = 0;
+       cmd->frag_desc_bank_cfg.num_banks = 1;
+       cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
+       cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
+                               __cpu_to_le32(htt->frag_desc.paddr);
+       cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
+       cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
+                               __cpu_to_le16(htt->max_num_pending_tx - 1);
+
+       ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+                           ret);
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
@@ -355,12 +427,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
        spin_lock_bh(&htt->tx_lock);
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+       spin_unlock_bh(&htt->tx_lock);
        if (res < 0) {
-               spin_unlock_bh(&htt->tx_lock);
                goto err_tx_dec;
        }
        msdu_id = res;
-       spin_unlock_bh(&htt->tx_lock);
 
        txdesc = ath10k_htc_alloc_skb(ar, len);
        if (!txdesc) {
@@ -376,6 +447,8 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
        skb_put(txdesc, len);
        cmd = (struct htt_cmd *)txdesc->data;
+       memset(cmd, 0, len);
+
        cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
        cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
        cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
@@ -422,6 +495,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        u16 msdu_id, flags1 = 0;
        dma_addr_t paddr = 0;
        u32 frags_paddr = 0;
+       struct htt_msdu_ext_desc *ext_desc = NULL;
 
        res = ath10k_htt_tx_inc_pending(htt);
        if (res)
@@ -429,12 +503,11 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
        spin_lock_bh(&htt->tx_lock);
        res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+       spin_unlock_bh(&htt->tx_lock);
        if (res < 0) {
-               spin_unlock_bh(&htt->tx_lock);
                goto err_tx_dec;
        }
        msdu_id = res;
-       spin_unlock_bh(&htt->tx_lock);
 
        prefetch_len = min(htt->prefetch_len, msdu->len);
        prefetch_len = roundup(prefetch_len, 4);
@@ -450,8 +523,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        if ((ieee80211_is_action(hdr->frame_control) ||
             ieee80211_is_deauth(hdr->frame_control) ||
             ieee80211_is_disassoc(hdr->frame_control)) &&
-            ieee80211_has_protected(hdr->frame_control))
+            ieee80211_has_protected(hdr->frame_control)) {
+               skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+       } else if (!skb_cb->htt.nohwcrypt &&
+                  skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
                skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+       }
 
        skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
                                       DMA_TO_DEVICE);
@@ -465,16 +542,30 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
                flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
                /* pass through */
        case ATH10K_HW_TXRX_ETHERNET:
-               frags = skb_cb->htt.txbuf->frags;
-
-               frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
-               frags[0].len = __cpu_to_le32(msdu->len);
-               frags[1].paddr = 0;
-               frags[1].len = 0;
-
+               if (ar->hw_params.continuous_frag_desc) {
+                       memset(&htt->frag_desc.vaddr[msdu_id], 0,
+                              sizeof(struct htt_msdu_ext_desc));
+                       frags = (struct htt_data_tx_desc_frag *)
+                               &htt->frag_desc.vaddr[msdu_id].frags;
+                       ext_desc = &htt->frag_desc.vaddr[msdu_id];
+                       frags[0].tword_addr.paddr_lo =
+                               __cpu_to_le32(skb_cb->paddr);
+                       frags[0].tword_addr.paddr_hi = 0;
+                       frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+                       frags_paddr =  htt->frag_desc.paddr +
+                               (sizeof(struct htt_msdu_ext_desc) * msdu_id);
+               } else {
+                       frags = skb_cb->htt.txbuf->frags;
+                       frags[0].dword_addr.paddr =
+                               __cpu_to_le32(skb_cb->paddr);
+                       frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+                       frags[1].dword_addr.paddr = 0;
+                       frags[1].dword_addr.len = 0;
+
+                       frags_paddr = skb_cb->htt.txbuf_paddr;
+               }
                flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
-
-               frags_paddr = skb_cb->htt.txbuf_paddr;
                break;
        case ATH10K_HW_TXRX_MGMT:
                flags0 |= SM(ATH10K_HW_TXRX_MGMT,
@@ -508,14 +599,20 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
                        prefetch_len);
        skb_cb->htt.txbuf->htc_hdr.flags = 0;
 
+       if (skb_cb->htt.nohwcrypt)
+               flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
        if (!skb_cb->is_protected)
                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 
        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
        flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
-       if (msdu->ip_summed == CHECKSUM_PARTIAL) {
+       if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+           !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
                flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+               if (ar->hw_params.continuous_frag_desc)
+                       ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
        }
 
        /* Prevent firmware from sending up tx inspection requests. There's
index 5997f00afe3b43b677f1718a64a213090c969d92..fef7ccf6e185eacee92b8eee1a22071edff550a5 100644 (file)
@@ -34,8 +34,15 @@ const struct ath10k_hw_regs qca988x_regs = {
        .ce7_base_address               = 0x00059000,
        .soc_reset_control_si0_rst_mask = 0x00000001,
        .soc_reset_control_ce_rst_mask  = 0x00040000,
-       .soc_chip_id_address            = 0x00ec,
-       .scratch_3_address              = 0x0030,
+       .soc_chip_id_address            = 0x000000ec,
+       .scratch_3_address              = 0x00000030,
+       .fw_indicator_address           = 0x00009030,
+       .pcie_local_base_address        = 0x00080000,
+       .ce_wrap_intr_sum_host_msi_lsb  = 0x00000008,
+       .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+       .pcie_intr_fw_mask              = 0x00000400,
+       .pcie_intr_ce_mask_all          = 0x0007f800,
+       .pcie_intr_clr_address          = 0x00000014,
 };
 
 const struct ath10k_hw_regs qca6174_regs = {
@@ -54,8 +61,79 @@ const struct ath10k_hw_regs qca6174_regs = {
        .ce7_base_address                       = 0x00036000,
        .soc_reset_control_si0_rst_mask         = 0x00000000,
        .soc_reset_control_ce_rst_mask          = 0x00000001,
-       .soc_chip_id_address                    = 0x000f0,
-       .scratch_3_address                      = 0x0028,
+       .soc_chip_id_address                    = 0x000000f0,
+       .scratch_3_address                      = 0x00000028,
+       .fw_indicator_address                   = 0x0003a028,
+       .pcie_local_base_address                = 0x00080000,
+       .ce_wrap_intr_sum_host_msi_lsb          = 0x00000008,
+       .ce_wrap_intr_sum_host_msi_mask         = 0x0000ff00,
+       .pcie_intr_fw_mask                      = 0x00000400,
+       .pcie_intr_ce_mask_all                  = 0x0007f800,
+       .pcie_intr_clr_address                  = 0x00000014,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+       .rtc_state_cold_reset_mask              = 0x00000400,
+       .rtc_soc_base_address                   = 0x00080000,
+       .rtc_wmac_base_address                  = 0x00000000,
+       .soc_core_base_address                  = 0x00082000,
+       .ce_wrapper_base_address                = 0x0004d000,
+       .ce0_base_address                       = 0x0004a000,
+       .ce1_base_address                       = 0x0004a400,
+       .ce2_base_address                       = 0x0004a800,
+       .ce3_base_address                       = 0x0004ac00,
+       .ce4_base_address                       = 0x0004b000,
+       .ce5_base_address                       = 0x0004b400,
+       .ce6_base_address                       = 0x0004b800,
+       .ce7_base_address                       = 0x0004bc00,
+       /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+        * CE0 and CE1 no other copy engine is directly referred in the code.
+        * It is not really neccessary to assign address for newly supported
+        * CEs in this address table.
+        *      Copy Engine             Address
+        *      CE8                     0x0004c000
+        *      CE9                     0x0004c400
+        *      CE10                    0x0004c800
+        *      CE11                    0x0004cc00
+        */
+       .soc_reset_control_si0_rst_mask         = 0x00000001,
+       .soc_reset_control_ce_rst_mask          = 0x00000100,
+       .soc_chip_id_address                    = 0x000000ec,
+       .scratch_3_address                      = 0x00040050,
+       .fw_indicator_address                   = 0x00040050,
+       .pcie_local_base_address                = 0x00000000,
+       .ce_wrap_intr_sum_host_msi_lsb          = 0x0000000c,
+       .ce_wrap_intr_sum_host_msi_mask         = 0x00fff000,
+       .pcie_intr_fw_mask                      = 0x00100000,
+       .pcie_intr_ce_mask_all                  = 0x000fff00,
+       .pcie_intr_clr_address                  = 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+       .rtc_state_val_on               = 3,
+       .ce_count                       = 8,
+       .msi_assign_ce_max              = 7,
+       .num_target_ce_config_wlan      = 7,
+       .ce_desc_meta_data_mask         = 0xFFFC,
+       .ce_desc_meta_data_lsb          = 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+       .rtc_state_val_on               = 3,
+       .ce_count                       = 8,
+       .msi_assign_ce_max              = 7,
+       .num_target_ce_config_wlan      = 7,
+       .ce_desc_meta_data_mask         = 0xFFFC,
+       .ce_desc_meta_data_lsb          = 2,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+       .rtc_state_val_on               = 5,
+       .ce_count                       = 12,
+       .msi_assign_ce_max              = 12,
+       .num_target_ce_config_wlan      = 10,
+       .ce_desc_meta_data_mask         = 0xFFF0,
+       .ce_desc_meta_data_lsb          = 4,
 };
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
index 85cca29375fee8f08ab09975c31cc3130194176e..d9de4a73847028f7d9c869dcc0ce617195488fbd 100644 (file)
@@ -72,6 +72,18 @@ enum qca6174_chip_id_rev {
 #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
 #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
 
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV     0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION     0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV     0x1
+#define QCA99X0_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA99X0_HW_2_0_OTP_FILE        "otp.bin"
+#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
 #define ATH10K_FW_API2_FILE            "firmware-2.bin"
 #define ATH10K_FW_API3_FILE            "firmware-3.bin"
 
@@ -112,6 +124,9 @@ enum ath10k_fw_ie_type {
         * FW API 5 and above.
         */
        ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+       /* Code swap image for firmware binary */
+       ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
 };
 
 enum ath10k_fw_wmi_op_version {
@@ -122,6 +137,7 @@ enum ath10k_fw_wmi_op_version {
        ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
        ATH10K_FW_WMI_OP_VERSION_TLV = 4,
        ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+       ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
 
        /* keep last */
        ATH10K_FW_WMI_OP_VERSION_MAX,
@@ -137,6 +153,8 @@ enum ath10k_fw_htt_op_version {
 
        ATH10K_FW_HTT_OP_VERSION_TLV = 3,
 
+       ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
        /* keep last */
        ATH10K_FW_HTT_OP_VERSION_MAX,
 };
@@ -144,6 +162,7 @@ enum ath10k_fw_htt_op_version {
 enum ath10k_hw_rev {
        ATH10K_HW_QCA988X,
        ATH10K_HW_QCA6174,
+       ATH10K_HW_QCA99X0,
 };
 
 struct ath10k_hw_regs {
@@ -164,26 +183,50 @@ struct ath10k_hw_regs {
        u32 soc_reset_control_ce_rst_mask;
        u32 soc_chip_id_address;
        u32 scratch_3_address;
+       u32 fw_indicator_address;
+       u32 pcie_local_base_address;
+       u32 ce_wrap_intr_sum_host_msi_lsb;
+       u32 ce_wrap_intr_sum_host_msi_mask;
+       u32 pcie_intr_fw_mask;
+       u32 pcie_intr_ce_mask_all;
+       u32 pcie_intr_clr_address;
 };
 
 extern const struct ath10k_hw_regs qca988x_regs;
 extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+
+struct ath10k_hw_values {
+       u32 rtc_state_val_on;
+       u8 ce_count;
+       u8 msi_assign_ce_max;
+       u8 num_target_ce_config_wlan;
+       u16 ce_desc_meta_data_mask;
+       u8 ce_desc_meta_data_lsb;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
                                u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
 
 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
 
 /* Known pecularities:
- *  - current FW doesn't support raw rx mode (last tested v599)
- *  - current FW dumps upon raw tx mode (last tested v599)
  *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
  *  - raw have FCS, nwifi doesn't
  *  - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
  *    param, llc/snap) are aligned to 4byte boundaries each */
 enum ath10k_hw_txrx_mode {
        ATH10K_HW_TXRX_RAW = 0,
+
+       /* Native Wifi decap mode is used to align IP frames to 4-byte
+        * boundaries and avoid a very expensive re-alignment in mac80211.
+        */
        ATH10K_HW_TXRX_NATIVE_WIFI = 1,
        ATH10K_HW_TXRX_ETHERNET = 2,
 
@@ -245,10 +288,6 @@ enum ath10k_hw_rate_cck {
 #define TARGET_RX_TIMEOUT_LO_PRI               100
 #define TARGET_RX_TIMEOUT_HI_PRI               40
 
-/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
- * avoid a very expensive re-alignment in mac80211. */
-#define TARGET_RX_DECAP_MODE                   ATH10K_HW_TXRX_NATIVE_WIFI
-
 #define TARGET_SCAN_MAX_PENDING_REQS           4
 #define TARGET_BMISS_OFFLOAD_MAX_VDEV          3
 #define TARGET_ROAM_OFFLOAD_MAX_VDEV           3
@@ -283,7 +322,6 @@ enum ath10k_hw_rate_cck {
 #define TARGET_10X_RX_CHAIN_MASK               (BIT(0) | BIT(1) | BIT(2))
 #define TARGET_10X_RX_TIMEOUT_LO_PRI           100
 #define TARGET_10X_RX_TIMEOUT_HI_PRI           40
-#define TARGET_10X_RX_DECAP_MODE               ATH10K_HW_TXRX_NATIVE_WIFI
 #define TARGET_10X_SCAN_MAX_PENDING_REQS       4
 #define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV      2
 #define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV       2
@@ -310,8 +348,70 @@ enum ath10k_hw_rate_cck {
 #define TARGET_TLV_NUM_MSDU_DESC               (1024 + 32)
 #define TARGET_TLV_NUM_WOW_PATTERNS            22
 
+/* Diagnostic Window */
+#define CE_DIAG_PIPE   7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS                  16
+#define TARGET_10_4_NUM_STATIONS               32
+#define TARGET_10_4_NUM_PEERS                  ((TARGET_10_4_NUM_STATIONS) + \
+                                                (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS               0
+
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX       512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS                50
+#define TARGET_10_4_NUM_OFFLOAD_PEERS          0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS  0
+#define TARGET_10_4_NUM_PEER_KEYS              2
+#define TARGET_10_4_TGT_NUM_TIDS               ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_AST_SKID_LIMIT             32
+#define TARGET_10_4_TX_CHAIN_MASK              (BIT(0) | BIT(1) | \
+                                                BIT(2) | BIT(3))
+#define TARGET_10_4_RX_CHAIN_MASK              (BIT(0) | BIT(1) | \
+                                                BIT(2) | BIT(3))
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI          100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI          40
+
+#define TARGET_10_4_RX_DECAP_MODE              ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS              4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV     3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV      3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES   8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS           0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS      0
+#define TARGET_10_4_MCAST2UCAST_MODE           0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE            1024
+#define TARGET_10_4_NUM_WDS_ENTRIES            32
+#define TARGET_10_4_DMA_BURST_SIZE             1
+#define TARGET_10_4_MAC_AGGR_DELIM             0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG                 0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV       3
+#define TARGET_10_4_NUM_MSDU_DESC              (1024 + 400)
+#define TARGET_10_4_11AC_TX_MAX_FRAGS          2
+#define TARGET_10_4_MAX_PEER_EXT_STATS         16
+#define TARGET_10_4_SMART_ANT_CAP              0
+#define TARGET_10_4_BK_MIN_FREE                        0
+#define TARGET_10_4_BE_MIN_FREE                        0
+#define TARGET_10_4_VI_MIN_FREE                        0
+#define TARGET_10_4_VO_MIN_FREE                        0
+#define TARGET_10_4_RX_BATCH_MODE              1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG  0
+#define TARGET_10_4_ATF_CONFIG                 0
+#define TARGET_10_4_IPHDR_PAD_CONFIG           1
+#define TARGET_10_4_QWRAP_CONFIG               0
+
 /* Number of Copy Engines supported */
-#define CE_COUNT 8
+#define CE_COUNT ar->hw_values->ce_count
 
 /*
  * Total number of PCIe MSI interrupts requested for all interrupt sources.
@@ -335,10 +435,10 @@ enum ath10k_hw_rate_cck {
 
 /* MSIs for Copy Engines */
 #define MSI_ASSIGN_CE_INITIAL  1
-#define MSI_ASSIGN_CE_MAX      7
+#define MSI_ASSIGN_CE_MAX      ar->hw_values->msi_assign_ce_max
 
 /* as of IP3.7.1 */
-#define RTC_STATE_V_ON                         3
+#define RTC_STATE_V_ON                         ar->hw_values->rtc_state_val_on
 
 #define RTC_STATE_COLD_RESET_MASK              ar->regs->rtc_state_cold_reset_mask
 #define RTC_STATE_V_LSB                                0
@@ -374,7 +474,7 @@ enum ath10k_hw_rate_cck {
 #define CE7_BASE_ADDRESS                       ar->regs->ce7_base_address
 #define DBI_BASE_ADDRESS                       0x00060000
 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS     0x0006c000
-#define PCIE_LOCAL_BASE_ADDRESS                        0x00080000
+#define PCIE_LOCAL_BASE_ADDRESS                ar->regs->pcie_local_base_address
 
 #define SOC_RESET_CONTROL_ADDRESS              0x00000000
 #define SOC_RESET_CONTROL_OFFSET               0x00000000
@@ -448,7 +548,7 @@ enum ath10k_hw_rate_cck {
 #define CORE_CTRL_ADDRESS                      0x0000
 #define PCIE_INTR_ENABLE_ADDRESS               0x0008
 #define PCIE_INTR_CAUSE_ADDRESS                        0x000c
-#define PCIE_INTR_CLR_ADDRESS                  0x0014
+#define PCIE_INTR_CLR_ADDRESS                  ar->regs->pcie_intr_clr_address
 #define SCRATCH_3_ADDRESS                      ar->regs->scratch_3_address
 #define CPU_INTR_ADDRESS                       0x0010
 
@@ -456,16 +556,18 @@ enum ath10k_hw_rate_cck {
 #define CCNT_TO_MSEC(x) ((x) / 88000)
 
 /* Firmware indications to the Host via SCRATCH_3 register. */
-#define FW_INDICATOR_ADDRESS   (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_INDICATOR_ADDRESS                   ar->regs->fw_indicator_address
 #define FW_IND_EVENT_PENDING                   1
 #define FW_IND_INITIALIZED                     2
 
 /* HOST_REG interrupt from firmware */
-#define PCIE_INTR_FIRMWARE_MASK                        0x00000400
-#define PCIE_INTR_CE_MASK_ALL                  0x0007f800
+#define PCIE_INTR_FIRMWARE_MASK                        ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL                  ar->regs->pcie_intr_ce_mask_all
 
 #define DRAM_BASE_ADDRESS                      0x00400000
 
+#define PCIE_BAR_REG_ADDRESS                   0x40030
+
 #define MISSING 0
 
 #define SYSTEM_SLEEP_OFFSET                    SOC_SYSTEM_SLEEP_OFFSET
index 218b6af63447458fa81960479afeeecf9a1520e9..96f4285e93b8dd9d34d2e47d2e5975f9c040d1e9 100644 (file)
@@ -197,6 +197,10 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
                return -EOPNOTSUPP;
        }
 
+       if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+       }
+
        if (cmd == DISABLE_KEY) {
                arg.key_cipher = WMI_CIPHER_NONE;
                arg.key_data = NULL;
@@ -218,6 +222,9 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
 
        reinit_completion(&ar->install_key_done);
 
+       if (arvif->nohwcrypt)
+               return 1;
+
        ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
        if (ret)
                return ret;
@@ -256,7 +263,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
 
                ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
                                         addr, flags);
-               if (ret)
+               if (ret < 0)
                        return ret;
 
                flags = 0;
@@ -264,7 +271,7 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
 
                ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
                                         addr, flags);
-               if (ret)
+               if (ret < 0)
                        return ret;
 
                spin_lock_bh(&ar->data_lock);
@@ -322,10 +329,10 @@ static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
                /* key flags are not required to delete the key */
                ret = ath10k_install_key(arvif, peer->keys[i],
                                         DISABLE_KEY, addr, flags);
-               if (ret && first_errno == 0)
+               if (ret < 0 && first_errno == 0)
                        first_errno = ret;
 
-               if (ret)
+               if (ret < 0)
                        ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
                                    i, ret);
 
@@ -398,7 +405,7 @@ static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
                        break;
                /* key flags are not required to delete the key */
                ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
-               if (ret && first_errno == 0)
+               if (ret < 0 && first_errno == 0)
                        first_errno = ret;
 
                if (ret)
@@ -591,11 +598,19 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
 static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
                              enum wmi_peer_type peer_type)
 {
+       struct ath10k_vif *arvif;
+       int num_peers = 0;
        int ret;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       if (ar->num_peers >= ar->max_num_peers)
+       num_peers = ar->num_peers;
+
+       /* Each vdev consumes a peer entry as well */
+       list_for_each_entry(arvif, &ar->arvifs, list)
+               num_peers++;
+
+       if (num_peers >= ar->max_num_peers)
                return -ENOBUFS;
 
        ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
@@ -671,20 +686,6 @@ static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
        return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
-static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
-{
-       struct ath10k *ar = arvif->ar;
-       u32 vdev_param;
-
-       if (value != 0xFFFFFFFF)
-               value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
-                               ATH10K_FRAGMT_THRESHOLD_MIN,
-                               ATH10K_FRAGMT_THRESHOLD_MAX);
-
-       vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
-       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
-}
-
 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 {
        int ret;
@@ -836,7 +837,7 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 {
        struct cfg80211_chan_def *chandef = NULL;
-       struct ieee80211_channel *channel = chandef->chan;
+       struct ieee80211_channel *channel = NULL;
        struct wmi_vdev_start_request_arg arg = {};
        int ret = 0;
 
@@ -1668,7 +1669,7 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
        return 0;
 }
 
-static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
 {
        struct ath10k_vif *arvif;
        int num = 0;
@@ -1676,7 +1677,7 @@ static int ath10k_mac_ps_vif_count(struct ath10k *ar)
        lockdep_assert_held(&ar->conf_mutex);
 
        list_for_each_entry(arvif, &ar->arvifs, list)
-               if (arvif->ps)
+               if (arvif->is_started)
                        num++;
 
        return num;
@@ -1700,7 +1701,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 
        enable_ps = arvif->ps;
 
-       if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+       if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
            !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
                      ar->fw_features)) {
                ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
@@ -2502,6 +2503,9 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
        u32 param;
        u32 value;
 
+       if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+               return 0;
+
        if (!(ar->vht_cap_info &
              (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
               IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
@@ -3034,38 +3038,16 @@ static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
 
        lockdep_assert_held(&ar->htt.tx_lock);
 
-       switch (pause_id) {
-       case WMI_TLV_TX_PAUSE_ID_MCC:
-       case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
-       case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
-       case WMI_TLV_TX_PAUSE_ID_AP_PS:
-       case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
-               switch (action) {
-               case WMI_TLV_TX_PAUSE_ACTION_STOP:
-                       ath10k_mac_vif_tx_lock(arvif, pause_id);
-                       break;
-               case WMI_TLV_TX_PAUSE_ACTION_WAKE:
-                       ath10k_mac_vif_tx_unlock(arvif, pause_id);
-                       break;
-               default:
-                       ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
-                                   action, arvif->vdev_id);
-                       break;
-               }
+       switch (action) {
+       case WMI_TLV_TX_PAUSE_ACTION_STOP:
+               ath10k_mac_vif_tx_lock(arvif, pause_id);
+               break;
+       case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+               ath10k_mac_vif_tx_unlock(arvif, pause_id);
                break;
-       case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
-       case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
-       case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
-       case WMI_TLV_TX_PAUSE_ID_HOST:
        default:
-               /* FIXME: Some pause_ids aren't vdev specific. Instead they
-                * target peer_id and tid. Implementing these could improve
-                * traffic scheduling fairness across multiple connected
-                * stations in AP/IBSS modes.
-                */
-               ath10k_dbg(ar, ATH10K_DBG_MAC,
-                          "mac ignoring unsupported tx pause vdev %i id %d\n",
-                          arvif->vdev_id, pause_id);
+               ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+                           action, arvif->vdev_id);
                break;
        }
 }
@@ -3082,12 +3064,15 @@ static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct ath10k_mac_tx_pause *arg = data;
 
+       if (arvif->vdev_id != arg->vdev_id)
+               return;
+
        ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
 }
 
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
-                               enum wmi_tlv_tx_pause_id pause_id,
-                               enum wmi_tlv_tx_pause_action action)
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+                                    enum wmi_tlv_tx_pause_id pause_id,
+                                    enum wmi_tlv_tx_pause_action action)
 {
        struct ath10k_mac_tx_pause arg = {
                .vdev_id = vdev_id,
@@ -3168,13 +3153,30 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
         * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
         * NativeWifi txmode - it selects AP key instead of peer key. It seems
         * to work with Ethernet txmode so use it.
+        *
+        * FIXME: Check if raw mode works with TDLS.
         */
        if (ieee80211_is_data_present(fc) && sta && sta->tdls)
                return ATH10K_HW_TXRX_ETHERNET;
 
+       if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+               return ATH10K_HW_TXRX_RAW;
+
        return ATH10K_HW_TXRX_NATIVE_WIFI;
 }
 
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+                                    struct sk_buff *skb) {
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+                        IEEE80211_TX_CTL_INJECTED;
+       if ((info->flags & mask) == mask)
+               return false;
+       if (vif)
+               return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+       return true;
+}
+
 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
  * Control in the header.
  */
@@ -3341,6 +3343,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
        int vdev_id;
        int ret;
        unsigned long time_left;
+       bool tmp_peer_created = false;
 
        /* FW requirement: We must create a peer before FW will send out
         * an offchannel frame. Otherwise the frame will be stuck and
@@ -3378,6 +3381,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                        if (ret)
                                ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
                                            peer_addr, vdev_id, ret);
+                       tmp_peer_created = (ret == 0);
                }
 
                spin_lock_bh(&ar->data_lock);
@@ -3393,7 +3397,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                        ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
                                    skb);
 
-               if (!peer) {
+               if (!peer && tmp_peer_created) {
                        ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
                        if (ret)
                                ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
@@ -3449,14 +3453,13 @@ void __ath10k_scan_finish(struct ath10k *ar)
        case ATH10K_SCAN_IDLE:
                break;
        case ATH10K_SCAN_RUNNING:
-               if (ar->scan.is_roc)
-                       ieee80211_remain_on_channel_expired(ar->hw);
-               /* fall through */
        case ATH10K_SCAN_ABORTING:
                if (!ar->scan.is_roc)
                        ieee80211_scan_completed(ar->hw,
                                                 (ar->scan.state ==
                                                  ATH10K_SCAN_ABORTING));
+               else if (ar->scan.roc_notify)
+                       ieee80211_remain_on_channel_expired(ar->hw);
                /* fall through */
        case ATH10K_SCAN_STARTING:
                ar->scan.state = ATH10K_SCAN_IDLE;
@@ -3620,6 +3623,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
        ATH10K_SKB_CB(skb)->htt.is_offchan = false;
        ATH10K_SKB_CB(skb)->htt.freq = 0;
        ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+       ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
        ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
        ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
        ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
@@ -3635,12 +3639,11 @@ static void ath10k_tx(struct ieee80211_hw *hw,
                ath10k_tx_h_8023(skb);
                break;
        case ATH10K_HW_TXRX_RAW:
-               /* FIXME: Packet injection isn't implemented. It should be
-                * doable with firmware 10.2 on qca988x.
-                */
-               WARN_ON_ONCE(1);
-               ieee80211_free_txskb(hw, skb);
-               return;
+               if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+                       WARN_ON_ONCE(1);
+                       ieee80211_free_txskb(hw, skb);
+                       return;
+               }
        }
 
        if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -4039,6 +4042,43 @@ static u32 get_nss_from_chainmask(u16 chain_mask)
        return 1;
 }
 
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+       u32 value = 0;
+       struct ath10k *ar = arvif->ar;
+
+       if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+               return 0;
+
+       if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                               IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+               value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
+
+       if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+                               IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+               value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
+
+       if (!value)
+               return 0;
+
+       if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+               value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+       if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+               value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+                         WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+       if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+               value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+       if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+               value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+                         WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+                                        ar->wmi.vdev_param->txbf, value);
+}
+
 /*
  * TODO:
  * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -4080,6 +4120,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
        }
 
+       if (ar->num_peers >= ar->max_num_peers) {
+               ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+               return -ENOBUFS;
+       }
+
        if (ar->free_vdev_map == 0) {
                ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
                ret = -EBUSY;
@@ -4159,6 +4204,14 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                        goto err;
                }
        }
+       if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+               arvif->nohwcrypt = true;
+
+       if (arvif->nohwcrypt &&
+           !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+               ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+               goto err;
+       }
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
                   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
@@ -4257,16 +4310,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                }
        }
 
-       ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+       ret = ath10k_mac_set_txbf_conf(arvif);
        if (ret) {
-               ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+               ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
                            arvif->vdev_id, ret);
                goto err_peer_delete;
        }
 
-       ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+       ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
        if (ret) {
-               ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
+               ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
                            arvif->vdev_id, ret);
                goto err_peer_delete;
        }
@@ -4641,9 +4694,6 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
        arg.vdev_id = arvif->vdev_id;
        arg.scan_id = ATH10K_SCAN_ID;
 
-       if (!req->no_cck)
-               arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
-
        if (req->ie_len) {
                arg.ie_len = req->ie_len;
                memcpy(arg.ie, req->ie, arg.ie_len);
@@ -4751,6 +4801,9 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
                return 1;
 
+       if (arvif->nohwcrypt)
+               return 1;
+
        if (key->keyidx > WMI_MAX_KEY_INDEX)
                return -ENOSPC;
 
@@ -4820,6 +4873,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
        ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
        if (ret) {
+               WARN_ON(ret > 0);
                ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
                            arvif->vdev_id, peer_addr, ret);
                goto exit;
@@ -4835,13 +4889,16 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
                ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
                if (ret) {
+                       WARN_ON(ret > 0);
                        ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
                                    arvif->vdev_id, peer_addr, ret);
                        ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
                                                  peer_addr, flags);
-                       if (ret2)
+                       if (ret2) {
+                               WARN_ON(ret2 > 0);
                                ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
                                            arvif->vdev_id, peer_addr, ret2);
+                       }
                        goto exit;
                }
        }
@@ -5462,6 +5519,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
                ar->scan.is_roc = true;
                ar->scan.vdev_id = arvif->vdev_id;
                ar->scan.roc_freq = chan->center_freq;
+               ar->scan.roc_notify = true;
                ret = 0;
                break;
        case ATH10K_SCAN_STARTING:
@@ -5525,7 +5583,13 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
        struct ath10k *ar = hw->priv;
 
        mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       ar->scan.roc_notify = false;
+       spin_unlock_bh(&ar->data_lock);
+
        ath10k_scan_abort(ar);
+
        mutex_unlock(&ar->conf_mutex);
 
        cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5561,12 +5625,27 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
        return ret;
 }
 
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       /* Even though there's a WMI enum for fragmentation threshold no known
+        * firmware actually implements it. Moreover it is not possible to rely
+        * frame fragmentation to mac80211 because firmware clears the "more
+        * fragments" bit in frame control making it impossible for remote
+        * devices to reassemble frames.
+        *
+        * Hence implement a dummy callback just to say fragmentation isn't
+        * supported. This effectively prevents mac80211 from doing frame
+        * fragmentation in software.
+        */
+       return -EOPNOTSUPP;
+}
+
 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                         u32 queues, bool drop)
 {
        struct ath10k *ar = hw->priv;
        bool skip;
-       int ret;
+       long time_left;
 
        /* mac80211 doesn't care if we really xmit queued frames or not
         * we'll collect those frames either way if we stop/delete vdevs */
@@ -5578,7 +5657,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        if (ar->state == ATH10K_STATE_WEDGED)
                goto skip;
 
-       ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
+       time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
                        bool empty;
 
                        spin_lock_bh(&ar->htt.tx_lock);
@@ -5592,9 +5671,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        (empty || skip);
                }), ATH10K_FLUSH_TIMEOUT_HZ);
 
-       if (ret <= 0 || skip)
-               ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
-                           skip, ar->state, ret);
+       if (time_left == 0 || skip)
+               ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+                           skip, ar->state, time_left);
 
 skip:
        mutex_unlock(&ar->conf_mutex);
@@ -6219,6 +6298,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
 
        arvif->is_started = true;
 
+       ret = ath10k_mac_vif_setup_ps(arvif);
+       if (ret) {
+               ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+                           arvif->vdev_id, ret);
+               goto err_stop;
+       }
+
        if (vif->type == NL80211_IFTYPE_MONITOR) {
                ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
                if (ret) {
@@ -6236,6 +6322,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
 err_stop:
        ath10k_vdev_stop(arvif);
        arvif->is_started = false;
+       ath10k_mac_vif_setup_ps(arvif);
 
 err:
        mutex_unlock(&ar->conf_mutex);
@@ -6395,6 +6482,7 @@ static const struct ieee80211_ops ath10k_ops = {
        .remain_on_channel              = ath10k_remain_on_channel,
        .cancel_remain_on_channel       = ath10k_cancel_remain_on_channel,
        .set_rts_threshold              = ath10k_set_rts_threshold,
+       .set_frag_threshold             = ath10k_mac_op_set_frag_threshold,
        .flush                          = ath10k_flush,
        .tx_last_beacon                 = ath10k_tx_last_beacon,
        .set_antenna                    = ath10k_set_antenna,
@@ -6565,8 +6653,11 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
        {
                .max = 2,
-               .types = BIT(NL80211_IFTYPE_STATION) |
-                        BIT(NL80211_IFTYPE_AP) |
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_AP) |
                         BIT(NL80211_IFTYPE_P2P_CLIENT) |
                         BIT(NL80211_IFTYPE_P2P_GO),
        },
@@ -6576,6 +6667,26 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
        },
 };
 
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP) |
+                        BIT(NL80211_IFTYPE_P2P_GO),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
+};
+
 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
        {
                .max = 1,
@@ -6594,7 +6705,7 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
        {
                .limits = ath10k_tlv_if_limit,
                .num_different_channels = 1,
-               .max_interfaces = 3,
+               .max_interfaces = 4,
                .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
        },
        {
@@ -6608,10 +6719,16 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
        {
                .limits = ath10k_tlv_if_limit,
-               .num_different_channels = 2,
-               .max_interfaces = 3,
+               .num_different_channels = 1,
+               .max_interfaces = 4,
                .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
        },
+       {
+               .limits = ath10k_tlv_qcs_if_limit,
+               .num_different_channels = 2,
+               .max_interfaces = 4,
+               .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+       },
        {
                .limits = ath10k_tlv_if_limit_ibss,
                .num_different_channels = 1,
@@ -6620,6 +6737,33 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
        },
 };
 
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max    = 16,
+               .types  = BIT(NL80211_IFTYPE_AP)
+       },
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+       {
+               .limits = ath10k_10_4_if_limits,
+               .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+               .max_interfaces = 16,
+               .num_different_channels = 1,
+               .beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+               .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                                       BIT(NL80211_CHAN_WIDTH_20) |
+                                       BIT(NL80211_CHAN_WIDTH_40) |
+                                       BIT(NL80211_CHAN_WIDTH_80),
+#endif
+       },
+};
+
 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
 {
        struct ieee80211_sta_vht_cap vht_cap = {0};
@@ -6844,7 +6988,6 @@ int ath10k_mac_register(struct ath10k *ar)
        ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
        ieee80211_hw_set(ar->hw, AP_LINK_PS);
        ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
-       ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
        ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
        ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
        ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
@@ -6852,6 +6995,9 @@ int ath10k_mac_register(struct ath10k *ar)
        ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
        ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
 
+       if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+               ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
        ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
        ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
@@ -6902,6 +7048,8 @@ int ath10k_mac_register(struct ath10k *ar)
                goto err_free;
        }
 
+       wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
        /*
         * on LL hardware queues are managed entirely by the FW
         * so we only advertise to mac we can do the queues thing
@@ -6941,6 +7089,11 @@ int ath10k_mac_register(struct ath10k *ar)
                ar->hw->wiphy->n_iface_combinations =
                        ARRAY_SIZE(ath10k_10x_if_comb);
                break;
+       case ATH10K_FW_WMI_OP_VERSION_10_4:
+               ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+               ar->hw->wiphy->n_iface_combinations =
+                       ARRAY_SIZE(ath10k_10_4_if_comb);
+               break;
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
                WARN_ON(1);
@@ -6948,7 +7101,8 @@ int ath10k_mac_register(struct ath10k *ar)
                goto err_free;
        }
 
-       ar->hw->netdev_features = NETIF_F_HW_CSUM;
+       if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+               ar->hw->netdev_features = NETIF_F_HW_CSUM;
 
        if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
                /* Init ath dfs pattern detector */
index b291f063705c3bb816f811aad450e9549aca5dfb..e3cefe4c7cfd04aa4e8c142282330068209bcb68 100644 (file)
@@ -61,9 +61,9 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
 
 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
-                               enum wmi_tlv_tx_pause_id pause_id,
-                               enum wmi_tlv_tx_pause_action action);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+                                    enum wmi_tlv_tx_pause_id pause_id,
+                                    enum wmi_tlv_tx_pause_action action);
 
 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
                             u8 hw_rate);
index ea656e011a96e195d4513e41a482a40bab07df42..f00b251ec9ce92f15275ec51c3ff292da3dd423d 100644 (file)
@@ -59,10 +59,12 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 
 #define QCA988X_2_0_DEVICE_ID  (0x003c)
 #define QCA6174_2_1_DEVICE_ID  (0x003e)
+#define QCA99X0_2_0_DEVICE_ID  (0x0040)
 
 static const struct pci_device_id ath10k_pci_id_table[] = {
        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
        { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+       { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
        {0}
 };
 
@@ -77,11 +79,12 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+       { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
 };
 
 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
 static int ath10k_pci_cold_reset(struct ath10k *ar);
-static int ath10k_pci_warm_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
 static int ath10k_pci_init_irq(struct ath10k *ar);
 static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -90,6 +93,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
                               struct ath10k_ce_pipe *rx_pipe,
                               struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
        /* CE0: host->target HTC control and raw streams */
@@ -155,6 +159,38 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .src_sz_max = DIAG_TRANSFER_LIMIT,
                .dest_nentries = 2,
        },
+
+       /* CE8: target->host pktlog */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 128,
+       },
+
+       /* CE9 target autonomous qcache memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE10: target autonomous hif memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE11: target autonomous hif memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
 };
 
 /* Target firmware's Copy Engine configuration. */
@@ -232,6 +268,38 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
        },
 
        /* CE7 used only by Host */
+       {
+               .pipenum = __cpu_to_le32(7),
+               .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+               .nentries = __cpu_to_le32(0),
+               .nbytes_max = __cpu_to_le32(0),
+               .flags = __cpu_to_le32(0),
+               .reserved = __cpu_to_le32(0),
+       },
+
+       /* CE8 target->host packtlog */
+       {
+               .pipenum = __cpu_to_le32(8),
+               .pipedir = __cpu_to_le32(PIPEDIR_IN),
+               .nentries = __cpu_to_le32(64),
+               .nbytes_max = __cpu_to_le32(2048),
+               .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = __cpu_to_le32(0),
+       },
+
+       /* CE9 target autonomous qcache memcpy */
+       {
+               .pipenum = __cpu_to_le32(9),
+               .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+               .nentries = __cpu_to_le32(32),
+               .nbytes_max = __cpu_to_le32(2048),
+               .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = __cpu_to_le32(0),
+       },
+
+       /* It not necessary to send target wlan configuration for CE10 & CE11
+        * as these CEs are not actively used in target.
+        */
 };
 
 /*
@@ -479,6 +547,12 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret;
 
+       if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+               ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+                           offset, offset + sizeof(value), ar_pci->mem_len);
+               return;
+       }
+
        ret = ath10k_pci_wake(ar);
        if (ret) {
                ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
@@ -496,6 +570,12 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
        u32 val;
        int ret;
 
+       if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+               ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+                           offset, offset + sizeof(val), ar_pci->mem_len);
+               return 0;
+       }
+
        ret = ath10k_pci_wake(ar);
        if (ret) {
                ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
@@ -678,6 +758,26 @@ static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
        ath10k_pci_rx_post(ar);
 }
 
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+       u32 val = 0;
+
+       switch (ar->hw_rev) {
+       case ATH10K_HW_QCA988X:
+       case ATH10K_HW_QCA6174:
+               val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                         CORE_CTRL_ADDRESS) &
+                      0x7ff) << 21;
+               break;
+       case ATH10K_HW_QCA99X0:
+               val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+               break;
+       }
+
+       val |= 0x100000 | (addr & 0xfffff);
+       return val;
+}
+
 /*
  * Diagnostic read/write access is provided for startup/config/debug usage.
  * Caller must guarantee proper alignment, when applicable, and single user
@@ -740,8 +840,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                 * convert it from Target CPU virtual address space
                 * to CE address space
                 */
-               address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
-                                                    address);
+               address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 
                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
                                            0);
@@ -899,7 +998,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
         * to
         *    CE address space
         */
-       address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+       address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 
        remaining_bytes = orig_nbytes;
        ce_data = ce_data_base;
@@ -1331,20 +1430,42 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
 {
        u32 val;
 
-       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
-       val &= ~CORE_CTRL_PCIE_REG_31_MASK;
-
-       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+       switch (ar->hw_rev) {
+       case ATH10K_HW_QCA988X:
+       case ATH10K_HW_QCA6174:
+               val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                       CORE_CTRL_ADDRESS);
+               val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+               ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+                                  CORE_CTRL_ADDRESS, val);
+               break;
+       case ATH10K_HW_QCA99X0:
+               /* TODO: Find appropriate register configuration for QCA99X0
+                *  to mask irq/MSI.
+                */
+                break;
+       }
 }
 
 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
 {
        u32 val;
 
-       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
-       val |= CORE_CTRL_PCIE_REG_31_MASK;
-
-       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+       switch (ar->hw_rev) {
+       case ATH10K_HW_QCA988X:
+       case ATH10K_HW_QCA6174:
+               val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                       CORE_CTRL_ADDRESS);
+               val |= CORE_CTRL_PCIE_REG_31_MASK;
+               ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+                                  CORE_CTRL_ADDRESS, val);
+               break;
+       case ATH10K_HW_QCA99X0:
+               /* TODO: Find appropriate register configuration for QCA99X0
+                *  to unmask irq/MSI.
+                */
+               break;
+       }
 }
 
 static void ath10k_pci_irq_disable(struct ath10k *ar)
@@ -1506,7 +1627,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
         * masked. To prevent the device from asserting the interrupt reset it
         * before proceeding with cleanup.
         */
-       ath10k_pci_warm_reset(ar);
+       ath10k_pci_safe_chip_reset(ar);
 
        ath10k_pci_irq_disable(ar);
        ath10k_pci_irq_sync(ar);
@@ -1687,6 +1808,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
 
        switch (ar_pci->pdev->device) {
        case QCA988X_2_0_DEVICE_ID:
+       case QCA99X0_2_0_DEVICE_ID:
                return 1;
        case QCA6174_2_1_DEVICE_ID:
                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
@@ -1757,7 +1879,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
 
        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
                                        target_ce_config_wlan,
-                                       sizeof(target_ce_config_wlan));
+                                       sizeof(struct ce_pipe_config) *
+                                       NUM_TARGET_CE_CONFIG_WLAN);
 
        if (ret != 0) {
                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
@@ -1871,7 +1994,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar)
                }
 
                /* Last CE is Diagnostic Window */
-               if (i == CE_COUNT - 1) {
+               if (i == CE_DIAG_PIPE) {
                        ar_pci->ce_diag = pipe->ce_hdl;
                        continue;
                }
@@ -2016,6 +2139,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+       if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
+               return ath10k_pci_warm_reset(ar);
+       } else if (QCA_REV_99X0(ar)) {
+               ath10k_pci_irq_disable(ar);
+               return ath10k_pci_qca99x0_chip_reset(ar);
+       } else {
+               return -ENOTSUPP;
+       }
+}
+
 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
 {
        int i, ret;
@@ -2122,12 +2257,38 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+       ret = ath10k_pci_cold_reset(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_pci_wait_for_target_init(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+                           ret);
+               return ret;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+       return 0;
+}
+
 static int ath10k_pci_chip_reset(struct ath10k *ar)
 {
        if (QCA_REV_988X(ar))
                return ath10k_pci_qca988x_chip_reset(ar);
        else if (QCA_REV_6174(ar))
                return ath10k_pci_qca6174_chip_reset(ar);
+       else if (QCA_REV_99X0(ar))
+               return ath10k_pci_qca99x0_chip_reset(ar);
        else
                return -ENOTSUPP;
 }
@@ -2602,7 +2763,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
 
 static int ath10k_pci_cold_reset(struct ath10k *ar)
 {
-       int i;
        u32 val;
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
@@ -2618,23 +2778,18 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
        val |= 1;
        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
-       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
-                                         RTC_STATE_COLD_RESET_MASK)
-                       break;
-               msleep(1);
-       }
+       /* After writing into SOC_GLOBAL_RESET to put device into
+        * reset and pulling out of reset pcie may not be stable
+        * for any immediate pcie register access and cause bus error,
+        * add delay before any pcie access request to fix this issue.
+        */
+       msleep(20);
 
        /* Pull Target, including PCIe, out of RESET. */
        val &= ~1;
        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
-       for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
-                                           RTC_STATE_COLD_RESET_MASK))
-                       break;
-               msleep(1);
-       }
+       msleep(20);
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
 
@@ -2679,6 +2834,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
        pci_set_master(pdev);
 
        /* Arrange for access to Target SoC registers. */
+       ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
        if (!ar_pci->mem) {
                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
@@ -2745,6 +2901,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        case QCA6174_2_1_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA6174;
                break;
+       case QCA99X0_2_0_DEVICE_ID:
+               hw_rev = ATH10K_HW_QCA99X0;
+               break;
        default:
                WARN_ON(1);
                return -ENOTSUPP;
index d7696ddc03c42b2b2622913f9c42674f22f84039..8d364fb8f743eb6f8ed488c88d83b8adf25b6536 100644 (file)
@@ -162,6 +162,7 @@ struct ath10k_pci {
        struct device *dev;
        struct ath10k *ar;
        void __iomem *mem;
+       size_t mem_len;
 
        /*
         * Number of MSI interrupts granted, 0 --> using legacy PCI line
@@ -236,18 +237,6 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
 #define CDC_WAR_MAGIC_STR   0xceef0000
 #define CDC_WAR_DATA_CE     4
 
-/*
- * TODO: Should be a function call specific to each Target-type.
- * This convoluted macro converts from Target CPU Virtual Address Space to CE
- * Address Space. As part of this process, we conservatively fetch the current
- * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
- * for this device; but that's not guaranteed.
- */
-#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)                 \
-       (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS |               \
-         CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |                         \
-        0x100000 | ((addr) & 0xfffff))
-
 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
 
index 492b5a5af434ddb67e01fa9c0d4638eb84f3d66e..ca8d16884af1de1500f31b224f1b68c282b9d945 100644 (file)
@@ -422,6 +422,12 @@ struct rx_mpdu_end {
 #define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
 #define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
 
+#define RX_MSDU_START_INFO2_DA_IDX_MASK         0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB          0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB  16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST      BIT(11)
+
 /* The decapped header (rx_hdr_status) contains the following:
  *  a) 802.11 header
  *  [padding to 4 bytes]
@@ -449,12 +455,23 @@ enum rx_msdu_decap_format {
        RX_MSDU_DECAP_8023_SNAP_LLC = 3
 };
 
-struct rx_msdu_start {
+struct rx_msdu_start_common {
        __le32 info0; /* %RX_MSDU_START_INFO0_ */
        __le32 flow_id_crc;
        __le32 info1; /* %RX_MSDU_START_INFO1_ */
 } __packed;
 
+struct rx_msdu_start_qca99x0 {
+       __le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start {
+       struct rx_msdu_start_common common;
+       union {
+               struct rx_msdu_start_qca99x0 qca99x0;
+       } __packed;
+} __packed;
+
 /*
  * msdu_length
  *             MSDU length in bytes after decapsulation.  This field is
@@ -540,7 +557,7 @@ struct rx_msdu_start {
 #define RX_MSDU_END_INFO0_PRE_DELIM_ERR             (1 << 30)
 #define RX_MSDU_END_INFO0_RESERVED_3B               (1 << 31)
 
-struct rx_msdu_end {
+struct rx_msdu_end_common {
        __le16 ip_hdr_cksum;
        __le16 tcp_hdr_cksum;
        u8 key_id_octet;
@@ -549,6 +566,36 @@ struct rx_msdu_end {
        __le32 info0;
 } __packed;
 
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK     0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB      0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK   0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB    10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK  0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB   16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE      BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK    0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB     0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK    0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB     6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK  0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB   12
+
+struct rx_msdu_end_qca99x0 {
+       __le32 ipv6_crc;
+       __le32 tcp_seq_no;
+       __le32 tcp_ack_no;
+       __le32 info1;
+       __le32 info2;
+} __packed;
+
+struct rx_msdu_end {
+       struct rx_msdu_end_common common;
+       union {
+               struct rx_msdu_end_qca99x0 qca99x0;
+       } __packed;
+} __packed;
+
 /*
  *ip_hdr_chksum
  *             This can include the IP header checksum or the pseudo header
@@ -870,7 +917,11 @@ struct rx_ppdu_start {
 #define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
 #define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
 
-#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK       0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB        2
+#define RX_PPDU_END_INFO1_BB_DATA             BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID      BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE           BIT(15)
 
 struct rx_ppdu_end_common {
        __le32 evm_p0;
@@ -891,13 +942,13 @@ struct rx_ppdu_end_common {
        __le32 evm_p15;
        __le32 tsf_timestamp;
        __le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
        u8 locationing_timestamp;
        u8 phy_err_code;
        __le16 flags; /* %RX_PPDU_END_FLAGS_ */
        __le32 info0; /* %RX_PPDU_END_INFO0_ */
-} __packed;
-
-struct rx_ppdu_end_qca988x {
        __le16 bb_length;
        __le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
@@ -909,16 +960,126 @@ struct rx_ppdu_end_qca988x {
 #define RX_PPDU_END_RTT_NORMAL_MODE            BIT(31)
 
 struct rx_ppdu_end_qca6174 {
+       u8 locationing_timestamp;
+       u8 phy_err_code;
+       __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+       __le32 info0; /* %RX_PPDU_END_INFO0_ */
        __le32 rtt; /* %RX_PPDU_END_RTT_ */
        __le16 bb_length;
        __le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
 
+#define RX_PKT_END_INFO0_RX_SUCCESS              BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX     BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP     BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART        BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP      BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART         BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK       0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB        0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK         0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB          18
+#define RX_LOCATION_INFO_PKT_BW_MASK             0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB              20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB  23
+#define RX_LOCATION_INFO_CIR_STATUS              BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE       BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X     BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE            BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID       BIT(31)
+
+struct rx_pkt_end {
+       __le32 info0; /* %RX_PKT_END_INFO0_ */
+       __le32 phy_timestamp_1;
+       __le32 phy_timestamp_2;
+       __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+       RX_PHY_PPDU_END_INFO0_ERR_RADAR           = BIT(2),
+       RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT        = BIT(3),
+       RX_PHY_PPDU_END_INFO0_ERR_RX_NAP          = BIT(4),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING     = BIT(5),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY     = BIT(6),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE       = BIT(7),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH     = BIT(8),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART    = BIT(9),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE    = BIT(10),
+       RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER     = BIT(12),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING      = BIT(13),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC  = BIT(14),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE        = BIT(15),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH      = BIT(16),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART     = BIT(17),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE     = BIT(18),
+       RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP  = BIT(19),
+       RX_PHY_PPDU_END_INFO0_ERR_HT_CRC          = BIT(20),
+       RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH       = BIT(21),
+       RX_PHY_PPDU_END_INFO0_ERR_HT_RATE         = BIT(22),
+       RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF          = BIT(23),
+       RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+       RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD     = BIT(25),
+       RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN   = BIT(26),
+       RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW       = BIT(27),
+       RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+       RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC         = BIT(29),
+       RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA        = BIT(30),
+       RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG        = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP            = BIT(0),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM           = BIT(1),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM     = BIT(2),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0    = BIT(3),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+       RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63   = BIT(5),
+       RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER  = BIT(6),
+       RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP          = BIT(7),
+       RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT    = BIT(8),
+       RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK     = BIT(9),
+       RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION       = BIT(10),
+       RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK        = BIT(11),
+       RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX    = BIT(12),
+       RX_PHY_PPDU_END_INFO1_ERR_RX_CBF             = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+       __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+       __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK          0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB           0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK        0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB         0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK          BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID       BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID  BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL    BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC      BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE       BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+       struct rx_pkt_end rx_pkt_end;
+       struct rx_phy_ppdu_end rx_phy_ppdu_end;
+       __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+       __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+       __le16 bb_length;
+       __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
 struct rx_ppdu_end {
        struct rx_ppdu_end_common common;
        union {
                struct rx_ppdu_end_qca988x qca988x;
                struct rx_ppdu_end_qca6174 qca6174;
+               struct rx_ppdu_end_qca99x0 qca99x0;
        } __packed;
 } __packed;
 
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644 (file)
index 0000000..3ca3fae
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+                                    struct ath10k_swap_code_seg_info *seg_info,
+                                    const void *data, size_t data_len)
+{
+       u8 *virt_addr = seg_info->virt_address[0];
+       u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+       const u8 *fw_data = data;
+       union ath10k_swap_code_seg_item *swap_item;
+       u32 length = 0;
+       u32 payload_len;
+       u32 total_payload_len = 0;
+       u32 size_left = data_len;
+
+       /* Parse swap bin and copy the content to host allocated memory.
+        * The format is Address, length and value. The last 4-bytes is
+        * target write address. Currently address field is not used.
+        */
+       seg_info->target_addr = -1;
+       while (size_left >= sizeof(*swap_item)) {
+               swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+               payload_len = __le32_to_cpu(swap_item->tlv.length);
+               if ((payload_len > size_left) ||
+                   (payload_len == 0 &&
+                    size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+                       ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+                                  payload_len);
+                       return -EINVAL;
+               }
+
+               if (payload_len == 0) {
+                       if (memcmp(swap_item->tail.magic_signature, swap_magic,
+                                  ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+                               ath10k_err(ar, "refusing an invalid swap file\n");
+                               return -EINVAL;
+                       }
+                       seg_info->target_addr =
+                               __le32_to_cpu(swap_item->tail.bmi_write_addr);
+                       break;
+               }
+
+               memcpy(virt_addr, swap_item->tlv.data, payload_len);
+               virt_addr += payload_len;
+               length = payload_len +  sizeof(struct ath10k_swap_code_seg_tlv);
+               size_left -= length;
+               fw_data += length;
+               total_payload_len += payload_len;
+       }
+
+       if (seg_info->target_addr == -1) {
+               ath10k_err(ar, "failed to parse invalid swap file\n");
+               return -EINVAL;
+       }
+       seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+       return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+                         struct ath10k_swap_code_seg_info *seg_info)
+{
+       u32 seg_size;
+
+       if (!seg_info)
+               return;
+
+       if (!seg_info->virt_address[0])
+               return;
+
+       seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+       dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+                         seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+       struct ath10k_swap_code_seg_info *seg_info;
+       void *virt_addr;
+       dma_addr_t paddr;
+
+       swap_bin_len = roundup(swap_bin_len, 2);
+       if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+               ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+                          swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+               return NULL;
+       }
+
+       seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+       if (!seg_info)
+               return NULL;
+
+       virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+                                      GFP_KERNEL);
+       if (!virt_addr) {
+               ath10k_err(ar, "failed to allocate dma coherent memory\n");
+               return NULL;
+       }
+
+       seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+       seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+       seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+       seg_info->seg_hw_info.num_segs =
+                       __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+       seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+       seg_info->virt_address[0] = virt_addr;
+       seg_info->paddr[0] = paddr;
+
+       return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+                                  enum ath10k_swap_code_seg_bin_type type)
+{
+       int ret;
+       struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+       switch (type) {
+       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
+               if (!ar->swap.firmware_swap_code_seg_info)
+                       return 0;
+
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+               seg_info = ar->swap.firmware_swap_code_seg_info;
+               break;
+       default:
+       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
+       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
+               ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
+                           type);
+               return 0;
+       }
+
+       ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+                                     &seg_info->seg_hw_info,
+                                     sizeof(seg_info->seg_hw_info));
+       if (ret) {
+               ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+                          ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar)
+{
+       ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+       ar->swap.firmware_codeswap_data = NULL;
+       ar->swap.firmware_codeswap_len = 0;
+       ar->swap.firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar)
+{
+       int ret;
+       struct ath10k_swap_code_seg_info *seg_info;
+
+       if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+               return 0;
+
+       seg_info = ath10k_swap_code_seg_alloc(ar,
+                                             ar->swap.firmware_codeswap_len);
+       if (!seg_info) {
+               ath10k_err(ar, "failed to allocate fw code swap segment\n");
+               return -ENOMEM;
+       }
+
+       ret = ath10k_swap_code_seg_fill(ar, seg_info,
+                                       ar->swap.firmware_codeswap_data,
+                                       ar->swap.firmware_codeswap_len);
+
+       if (ret) {
+               ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+                           ret);
+               ath10k_swap_code_seg_free(ar, seg_info);
+               return ret;
+       }
+
+       ar->swap.firmware_swap_code_seg_info = seg_info;
+
+       return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644 (file)
index 0000000..5c89952
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX       (512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ    12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX           16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED     1
+
+struct ath10k_swap_code_seg_tlv {
+       __le32 address;
+       __le32 length;
+       u8 data[0];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+       u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+       __le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+       struct ath10k_swap_code_seg_tlv tlv;
+       struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+enum ath10k_swap_code_seg_bin_type {
+        ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
+        ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
+        ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
+};
+
+struct ath10k_swap_code_seg_hw_info {
+       /* Swap binary image size */
+       __le32 swap_size;
+       __le32 num_segs;
+
+       /* Swap data size */
+       __le32 size;
+       __le32 size_log2;
+       __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+       __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+       struct ath10k_swap_code_seg_hw_info seg_hw_info;
+       void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+       u32 target_addr;
+       dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+                                  enum ath10k_swap_code_seg_bin_type type);
+void ath10k_swap_code_seg_release(struct ath10k *ar);
+int ath10k_swap_code_seg_init(struct ath10k *ar);
+
+#endif
index a417aae52623de0e86b951029c0991717eaf39dc..768bef6290995b4e2b4c80c6c3e09e7b3f15893a 100644 (file)
@@ -450,4 +450,7 @@ Fw Mode/SubMode Mask
 #define QCA6174_BOARD_DATA_SZ     8192
 #define QCA6174_BOARD_EXT_DATA_SZ 0
 
+#define QCA99X0_BOARD_DATA_SZ    12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
 #endif /* __TARGADDRS_H__ */
index 826500bb2b1b247233fbf1998733c2d8c16b3fe7..e4a9c4c8d0cb7e8f7a9a89372e6537d9efc9887e 100644 (file)
@@ -53,8 +53,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
        struct ath10k_skb_cb *skb_cb;
        struct sk_buff *msdu;
 
-       lockdep_assert_held(&htt->tx_lock);
-
        ath10k_dbg(ar, ATH10K_DBG_HTT,
                   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
                   tx_done->msdu_id, !!tx_done->discard,
@@ -66,12 +64,19 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
                return;
        }
 
+       spin_lock_bh(&htt->tx_lock);
        msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
        if (!msdu) {
                ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
                            tx_done->msdu_id);
+               spin_unlock_bh(&htt->tx_lock);
                return;
        }
+       ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+       __ath10k_htt_tx_dec_pending(htt);
+       if (htt->num_pending_tx == 0)
+               wake_up(&htt->empty_tx_wq);
+       spin_unlock_bh(&htt->tx_lock);
 
        skb_cb = ATH10K_SKB_CB(msdu);
 
@@ -90,7 +95,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
 
        if (tx_done->discard) {
                ieee80211_free_txskb(htt->ar->hw, msdu);
-               goto exit;
+               return;
        }
 
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
@@ -104,12 +109,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
 
        ieee80211_tx_status(htt->ar->hw, msdu);
        /* we do not own the msdu anymore */
-
-exit:
-       ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-       __ath10k_htt_tx_dec_pending(htt);
-       if (htt->num_pending_tx == 0)
-               wake_up(&htt->empty_tx_wq);
 }
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -147,9 +146,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
 static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
                                       const u8 *addr, bool expect_mapped)
 {
-       int ret;
+       long time_left;
 
-       ret = wait_event_timeout(ar->peer_mapping_wq, ({
+       time_left = wait_event_timeout(ar->peer_mapping_wq, ({
                        bool mapped;
 
                        spin_lock_bh(&ar->data_lock);
@@ -160,7 +159,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
                         test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
                }), 3*HZ);
 
-       if (ret <= 0)
+       if (time_left == 0)
                return -ETIMEDOUT;
 
        return 0;
index 47fe2e756becd4ebacabf199ee204f732413e378..2591018c4dc5e1d528b6550a6cc8141d9531f54a 100644 (file)
@@ -49,6 +49,7 @@ struct wmi_ops {
                            struct wmi_roam_ev_arg *arg);
        int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
                              struct wmi_wow_ev_arg *arg);
+       enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
 
        struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
        struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -319,6 +320,15 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
        return ar->wmi.ops->pull_wow_event(ar, skb, arg);
 }
 
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+       if (!ar->wmi.ops->get_txbf_conf_scheme)
+               return WMI_TXBF_CONF_UNSUPPORTED;
+
+       return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
 static inline int
 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 {
index 8fdba3865c960e699bacf0998fd911d1e3a231b1..357b5a292a892cb29592278120406a5e07396e44 100644 (file)
@@ -377,12 +377,34 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
                   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
                   pause_id, action, vdev_map, peer_id, tid_map);
 
-       for (vdev_id = 0; vdev_map; vdev_id++) {
-               if (!(vdev_map & BIT(vdev_id)))
-                       continue;
-
-               vdev_map &= ~BIT(vdev_id);
-               ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+       switch (pause_id) {
+       case WMI_TLV_TX_PAUSE_ID_MCC:
+       case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+       case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+       case WMI_TLV_TX_PAUSE_ID_AP_PS:
+       case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+               for (vdev_id = 0; vdev_map; vdev_id++) {
+                       if (!(vdev_map & BIT(vdev_id)))
+                               continue;
+
+                       vdev_map &= ~BIT(vdev_id);
+                       ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+                                                       action);
+               }
+               break;
+       case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+       case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+       case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+       case WMI_TLV_TX_PAUSE_ID_HOST:
+               ath10k_dbg(ar, ATH10K_DBG_MAC,
+                          "mac ignoring unsupported tx pause id %d\n",
+                          pause_id);
+               break;
+       default:
+               ath10k_dbg(ar, ATH10K_DBG_MAC,
+                          "mac ignoring unknown tx pause vdev %d\n",
+                          pause_id);
+               break;
        }
 
        kfree(tb);
@@ -497,7 +519,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_TLV_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_TLV_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -709,6 +731,8 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
                                         const void *ptr, void *data)
 {
        struct wmi_tlv_swba_parse *swba = data;
+       struct wmi_tim_info_arg *tim_info_arg;
+       const struct wmi_tim_info *tim_info_ev = ptr;
 
        if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
                return -EPROTO;
@@ -716,7 +740,21 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
        if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
                return -ENOBUFS;
 
-       swba->arg->tim_info[swba->n_tim++] = ptr;
+       if (__le32_to_cpu(tim_info_ev->tim_len) >
+            sizeof(tim_info_ev->tim_bitmap)) {
+               ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+               return -EPROTO;
+       }
+
+       tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+       tim_info_arg->tim_len = tim_info_ev->tim_len;
+       tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+       tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+       tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+       tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+       swba->n_tim++;
+
        return 0;
 }
 
@@ -1241,6 +1279,11 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
        return skb;
 }
 
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+       return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
                                     u32 param_value)
@@ -1335,7 +1378,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
        cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
        cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
        cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
-       cfg->rx_decap_mode = __cpu_to_le32(1);
+       cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
        cfg->scan_max_pending_reqs = __cpu_to_le32(4);
        cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
        cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -3151,6 +3194,38 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
        .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
        .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
        .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -3204,6 +3279,48 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
        .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
        .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
        .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -3262,6 +3379,22 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
        .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                                        WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static const struct wmi_ops wmi_tlv_ops = {
@@ -3280,6 +3413,7 @@ static const struct wmi_ops wmi_tlv_ops = {
        .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
        .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
        .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+       .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
 
        .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
        .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
index 6c046c244705fe69f23fa9207113c6cefec9a78e..36b8f7148b5162da910d0f1fc46c8133ceabe1f8 100644 (file)
@@ -148,6 +148,48 @@ static struct wmi_cmd_map wmi_cmd_map = {
        .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
        .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+       .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+       .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 /* 10.X WMI cmd track */
@@ -271,6 +313,48 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
        .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
        .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+       .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+       .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 /* 10.2.4 WMI cmd track */
@@ -393,6 +477,231 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
        .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
        .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+       .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+       .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+       .init_cmdid = WMI_10_4_INIT_CMDID,
+       .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+       .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+       .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+       .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+       .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+       .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+       .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+       .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+       .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+       .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+       .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+       .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+       .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+       .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+       .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+       .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+       .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+       .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+       .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+       .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+       .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+       .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+       .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+       .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+       .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+       .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+       .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+       .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+       .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+       .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+       .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+       .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+       .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+       .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+       .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+       .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+       .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+       .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+       .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+       .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+       .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+       .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+       .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+       .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+       .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+       .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+       .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+       .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+       .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+       .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+       .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+       .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+       .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+       .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+       .roam_scan_rssi_change_threshold =
+                               WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+       .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+       .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+       .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+       .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+       .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+       .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+       .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+       .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+       .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+       .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+       .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+       .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+       .wlan_profile_set_hist_intvl_cmdid =
+                               WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       .wlan_profile_get_profile_data_cmdid =
+                               WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       .wlan_profile_enable_profile_id_cmdid =
+                               WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       .wlan_profile_list_profile_id_cmdid =
+                               WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+       .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+       .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+       .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+       .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+       .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+       .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+       .wow_enable_disable_wake_event_cmdid =
+                               WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+       .wow_hostwakeup_from_sleep_cmdid =
+                               WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+       .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+       .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+       .vdev_spectral_scan_configure_cmdid =
+                               WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+       .vdev_spectral_scan_enable_cmdid =
+                               WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+       .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+       .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+       .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+       .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+       .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+       .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+       .echo_cmdid = WMI_10_4_ECHO_CMDID,
+       .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+       .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+       .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+       .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+       .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+       .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+       .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+       .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+       .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+       .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+       .tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED,
+       .tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED,
+       .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+       .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+       .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+       .wlan_peer_caching_add_peer_cmdid =
+                       WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+       .wlan_peer_caching_evict_peer_cmdid =
+                       WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+       .wlan_peer_caching_restore_peer_cmdid =
+                       WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+       .wlan_peer_caching_print_all_peers_info_cmdid =
+                       WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+       .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+       .peer_add_proxy_sta_entry_cmdid =
+                       WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+       .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+       .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+       .nan_cmdid = WMI_10_4_NAN_CMDID,
+       .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+       .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+       .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+       .pdev_smart_ant_set_rx_antenna_cmdid =
+                       WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+       .peer_smart_ant_set_tx_antenna_cmdid =
+                       WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+       .peer_smart_ant_set_train_info_cmdid =
+                       WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+       .peer_smart_ant_set_node_config_ops_cmdid =
+                       WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+       .pdev_set_antenna_switch_table_cmdid =
+                       WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+       .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+       .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+       .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+       .pdev_ratepwr_chainmsk_table_cmdid =
+                       WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+       .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+       .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+       .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+       .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+       .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+       .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+       .pdev_get_ani_ofdm_config_cmdid =
+                       WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+       .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+       .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+       .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+       .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+       .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+       .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+       .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+       .vdev_filter_neighbor_rx_packets_cmdid =
+                       WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+       .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+       .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+       .pdev_bss_chan_info_request_cmdid =
+                       WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
 };
 
 /* MAIN WMI VDEV param map */
@@ -452,6 +761,22 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
        .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                                        WMI_VDEV_PARAM_UNSUPPORTED,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 /* 10.X WMI VDEV param map */
@@ -511,6 +836,22 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
        .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -569,6 +910,97 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
        .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+       .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+       .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+       .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+       .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+       .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+       .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+       .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+       .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+       .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+       .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+       .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+       .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+       .wmi_vdev_oc_scheduler_air_time_limit =
+              WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       .wds = WMI_10_4_VDEV_PARAM_WDS,
+       .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+       .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+       .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+       .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+       .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+       .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+       .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+       .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+       .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+       .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+       .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+       .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+       .sgi = WMI_10_4_VDEV_PARAM_SGI,
+       .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+       .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+       .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+       .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+       .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+       .nss = WMI_10_4_VDEV_PARAM_NSS,
+       .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+       .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+       .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+       .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+       .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+       .ap_keepalive_min_idle_inactive_time_secs =
+              WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+       .ap_keepalive_max_idle_inactive_time_secs =
+              WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+       .ap_keepalive_max_unresponsive_time_secs =
+              WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+       .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+       .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+       .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+       .txbf = WMI_10_4_VDEV_PARAM_TXBF,
+       .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+       .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+       .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+       .ap_detect_out_of_sync_sleeping_sta_time_secs =
+              WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+       .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+       .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+       .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+       .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+       .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+       .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+       .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+       .early_rx_bmiss_sample_cycle =
+              WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+       .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+       .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+       .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+       .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+       .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+       .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+       .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
 };
 
 static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -621,6 +1053,48 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
        .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
        .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
        .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -674,6 +1148,48 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
        .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
        .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
        .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -727,6 +1243,48 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
        .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
        .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
        .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 /* firmware 10.2 specific mappings */
@@ -849,6 +1407,139 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
        .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
        .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+       .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+       .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+       .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+       .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+       .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+       .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+       .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+       .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+       .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+       .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+       .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+       .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+       .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+       .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+       .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+       .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+       .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+       .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+       .pdev_stats_update_period =
+                       WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       .vdev_stats_update_period =
+                       WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       .peer_stats_update_period =
+                       WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       .bcnflt_stats_update_period =
+                       WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+       .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+       .dcs = WMI_10_4_PDEV_PARAM_DCS,
+       .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+       .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+       .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+       .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+       .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+       .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+       .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+       .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+       .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+       .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+       .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+       .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+       .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+       .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+       .smart_antenna_default_antenna =
+                       WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+       .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+       .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+       .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+       .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+       .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+       .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+       .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+       .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+       .remove_mcast2ucast_buffer =
+                       WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+       .peer_sta_ps_statechg_enable =
+                       WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+       .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+       .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+       .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+       .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+       .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+       .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+       .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+       .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+       .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+       .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+       .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+       .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+       .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+       .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+       .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+       .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+       .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+       .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+       .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+       .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+       .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+       .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+       .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+       .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+       .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+       .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+       .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+       .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+       .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
 };
 
 void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1232,6 +1923,8 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
                        return "completed [preempted]";
                case WMI_SCAN_REASON_TIMEDOUT:
                        return "completed [timedout]";
+               case WMI_SCAN_REASON_INTERNAL_FAILURE:
+                       return "completed [internal err]";
                case WMI_SCAN_REASON_MAX:
                        break;
                }
@@ -1246,6 +1939,10 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
                return "preempted";
        case WMI_SCAN_EVENT_START_FAILED:
                return "start failed";
+       case WMI_SCAN_EVENT_RESTARTED:
+               return "restarted";
+       case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+               return "foreign channel exit";
        default:
                return "unknown";
        }
@@ -1321,6 +2018,8 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_SCAN_EVENT_DEQUEUED:
        case WMI_SCAN_EVENT_PREEMPTED:
+       case WMI_SCAN_EVENT_RESTARTED:
+       case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
        default:
                break;
        }
@@ -1433,6 +2132,40 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+                                             struct sk_buff *skb,
+                                             struct wmi_mgmt_rx_ev_arg *arg)
+{
+       struct wmi_10_4_mgmt_rx_event *ev;
+       struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+       size_t pull_len;
+       u32 msdu_len;
+
+       ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+       ev_hdr = &ev->hdr;
+       pull_len = sizeof(*ev);
+
+       if (skb->len < pull_len)
+               return -EPROTO;
+
+       skb_pull(skb, pull_len);
+       arg->channel = ev_hdr->channel;
+       arg->buf_len = ev_hdr->buf_len;
+       arg->status = ev_hdr->status;
+       arg->snr = ev_hdr->snr;
+       arg->phy_mode = ev_hdr->phy_mode;
+       arg->rate = ev_hdr->rate;
+
+       msdu_len = __le32_to_cpu(arg->buf_len);
+       if (skb->len < msdu_len)
+               return -EPROTO;
+
+       /* Make sure bytes added for padding are removed. */
+       skb_trim(skb, msdu_len);
+
+       return 0;
+}
+
 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_mgmt_rx_ev_arg arg = {};
@@ -1593,6 +2326,29 @@ static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+                                             struct sk_buff *skb,
+                                             struct wmi_ch_info_ev_arg *arg)
+{
+       struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+       if (skb->len < sizeof(*ev))
+               return -EPROTO;
+
+       skb_pull(skb, sizeof(*ev));
+       arg->err_code = ev->err_code;
+       arg->freq = ev->freq;
+       arg->cmd_flags = ev->cmd_flags;
+       arg->noise_floor = ev->noise_floor;
+       arg->rx_clear_count = ev->rx_clear_count;
+       arg->cycle_count = ev->cycle_count;
+       arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+       arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+       arg->rx_frame_count = ev->rx_frame_count;
+
+       return 0;
+}
+
 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_ch_info_ev_arg arg = {};
@@ -2149,33 +2905,42 @@ exit:
 static void ath10k_wmi_update_tim(struct ath10k *ar,
                                  struct ath10k_vif *arvif,
                                  struct sk_buff *bcn,
-                                 const struct wmi_tim_info *tim_info)
+                                 const struct wmi_tim_info_arg *tim_info)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
        struct ieee80211_tim_ie *tim;
        u8 *ies, *ie;
        u8 ie_len, pvm_len;
        __le32 t;
-       u32 v;
+       u32 v, tim_len;
+
+       /* When FW reports 0 in tim_len, ensure atleast first byte
+        * in tim_bitmap is considered for pvm calculation.
+        */
+       tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
 
        /* if next SWBA has no tim_changed the tim_bitmap is garbage.
         * we must copy the bitmap upon change and reuse it later */
        if (__le32_to_cpu(tim_info->tim_changed)) {
                int i;
 
-               BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
-                            sizeof(tim_info->tim_bitmap));
+               if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+                       ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+                                   tim_len, sizeof(arvif->u.ap.tim_bitmap));
+                       tim_len = sizeof(arvif->u.ap.tim_bitmap);
+               }
 
-               for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+               for (i = 0; i < tim_len; i++) {
                        t = tim_info->tim_bitmap[i / 4];
                        v = __le32_to_cpu(t);
                        arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
                }
 
-               /* FW reports either length 0 or 16
-                * so we calculate this on our own */
+               /* FW reports either length 0 or length based on max supported
+                * station. so we calculate this on our own
+                */
                arvif->u.ap.tim_len = 0;
-               for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+               for (i = 0; i < tim_len; i++)
                        if (arvif->u.ap.tim_bitmap[i])
                                arvif->u.ap.tim_len = i;
 
@@ -2199,7 +2964,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
        pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
 
        if (pvm_len < arvif->u.ap.tim_len) {
-               int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+               int expand_size = tim_len - pvm_len;
                int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
                void *next_ie = ie + 2 + ie_len;
 
@@ -2214,7 +2979,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
                }
        }
 
-       if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+       if (pvm_len > tim_len) {
                ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
                return;
        }
@@ -2278,7 +3043,21 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
                if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
                        break;
 
-               arg->tim_info[i] = &ev->bcn_info[i].tim_info;
+               if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+                    sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+                       ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+                       return -EPROTO;
+               }
+
+               arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+               arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+               arg->tim_info[i].tim_bitmap =
+                               ev->bcn_info[i].tim_info.tim_bitmap;
+               arg->tim_info[i].tim_changed =
+                               ev->bcn_info[i].tim_info.tim_changed;
+               arg->tim_info[i].tim_num_ps_pending =
+                               ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
                arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
                i++;
        }
@@ -2286,12 +3065,74 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+                                          struct sk_buff *skb,
+                                          struct wmi_swba_ev_arg *arg)
+{
+       struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+       u32 map, tim_len;
+       size_t i;
+
+       if (skb->len < sizeof(*ev))
+               return -EPROTO;
+
+       skb_pull(skb, sizeof(*ev));
+       arg->vdev_map = ev->vdev_map;
+
+       for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+               if (!(map & BIT(0)))
+                       continue;
+
+               /* If this happens there were some changes in firmware and
+                * ath10k should update the max size of tim_info array.
+                */
+               if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+                       break;
+
+               if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+                     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+                       ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+                       return -EPROTO;
+               }
+
+               tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+               if (tim_len) {
+                       /* Exclude 4 byte guard length */
+                       tim_len -= 4;
+                       arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+               } else {
+                       arg->tim_info[i].tim_len = 0;
+               }
+
+               arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+               arg->tim_info[i].tim_bitmap =
+                               ev->bcn_info[i].tim_info.tim_bitmap;
+               arg->tim_info[i].tim_changed =
+                               ev->bcn_info[i].tim_info.tim_changed;
+               arg->tim_info[i].tim_num_ps_pending =
+                               ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+               /* 10.4 firmware doesn't have p2p support. notice of absence
+                * info can be ignored for now.
+                */
+
+               i++;
+       }
+
+       return 0;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+       return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_swba_ev_arg arg = {};
        u32 map;
        int i = -1;
-       const struct wmi_tim_info *tim_info;
+       const struct wmi_tim_info_arg *tim_info;
        const struct wmi_p2p_noa_info *noa_info;
        struct ath10k_vif *arvif;
        struct sk_buff *bcn;
@@ -2320,7 +3161,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                        break;
                }
 
-               tim_info = arg.tim_info[i];
+               tim_info = &arg.tim_info[i];
                noa_info = arg.noa_info[i];
 
                ath10k_dbg(ar, ATH10K_DBG_MGMT,
@@ -2335,6 +3176,10 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                           __le32_to_cpu(tim_info->tim_bitmap[1]),
                           __le32_to_cpu(tim_info->tim_bitmap[0]));
 
+               /* TODO: Only first 4 word from tim_bitmap is dumped.
+                * Extend debug code to dump full tim_bitmap.
+                */
+
                arvif = ath10k_get_arvif(ar, vdev_id);
                if (arvif == NULL) {
                        ath10k_warn(ar, "no vif for vdev_id %d found\n",
@@ -2658,7 +3503,7 @@ void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
                                                          fftr, fftr_len,
                                                          tsf);
                        if (res < 0) {
-                               ath10k_warn(ar, "failed to process fft report: %d\n",
+                               ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
                                            res);
                                return;
                        }
@@ -2949,7 +3794,7 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
        ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
                                                           pool_size,
                                                           &paddr,
-                                                          GFP_ATOMIC);
+                                                          GFP_KERNEL);
        if (!ar->wmi.mem_chunks[idx].vaddr) {
                ath10k_warn(ar, "failed to allocate memory chunk\n");
                return -ENOMEM;
@@ -3038,12 +3883,19 @@ ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
-void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
 {
+       struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+       struct sk_buff *skb = ar->svc_rdy_skb;
        struct wmi_svc_rdy_ev_arg arg = {};
        u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
        int ret;
 
+       if (!skb) {
+               ath10k_warn(ar, "invalid service ready event skb\n");
+               return;
+       }
+
        ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
        if (ret) {
                ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
@@ -3075,10 +3927,10 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
        if (ar->fw_api == 1 && ar->fw_version_build > 636)
                set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
 
-       if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+       if (ar->num_rf_chains > ar->max_spatial_stream) {
                ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
-                           ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
-               ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+                           ar->num_rf_chains, ar->max_spatial_stream);
+               ar->num_rf_chains = ar->max_spatial_stream;
        }
 
        ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
@@ -3101,20 +3953,39 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
                return;
        }
 
+       if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+               ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+                                   TARGET_10_4_NUM_VDEVS;
+               ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+                                      TARGET_10_4_NUM_VDEVS;
+               ar->num_tids = ar->num_active_peers * 2;
+               ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+       }
+
+       /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+        * and WMI_SERVICE_IRAM_TIDS, etc.
+        */
+
        for (i = 0; i < num_mem_reqs; ++i) {
                req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
                num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
                unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
                num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
 
-               if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+               if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+                       if (ar->num_active_peers)
+                               num_units = ar->num_active_peers + 1;
+                       else
+                               num_units = ar->max_num_peers + 1;
+               } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
                        /* number of units to allocate is number of
                         * peers, 1 extra for self peer on target */
                        /* this needs to be tied, host and target
                         * can get out of sync */
-                       num_units = TARGET_10X_NUM_PEERS + 1;
-               else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
-                       num_units = TARGET_10X_NUM_VDEVS + 1;
+                       num_units = ar->max_num_peers + 1;
+               } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+                       num_units = ar->max_num_vdevs + 1;
+               }
 
                ath10k_dbg(ar, ATH10K_DBG_WMI,
                           "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
@@ -3144,9 +4015,17 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
                   __le32_to_cpu(arg.eeprom_rd),
                   __le32_to_cpu(arg.num_mem_reqs));
 
+       dev_kfree_skb(skb);
+       ar->svc_rdy_skb = NULL;
        complete(&ar->wmi.service_ready);
 }
 
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+       ar->svc_rdy_skb = skb;
+       queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
                                     struct wmi_rdy_ev_arg *arg)
 {
@@ -3318,7 +4197,7 @@ static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -3439,7 +4318,7 @@ static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_10X_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_10X_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -3550,7 +4429,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_10_2_SERVICE_READY_EVENTID:
                ath10k_wmi_event_service_ready(ar, skb);
-               break;
+               return;
        case WMI_10_2_READY_EVENTID:
                ath10k_wmi_event_ready(ar, skb);
                break;
@@ -3576,6 +4455,73 @@ out:
        dev_kfree_skb(skb);
 }
 
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_cmd_hdr *cmd_hdr;
+       enum wmi_10_4_event_id id;
+
+       cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+       if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+               goto out;
+
+       trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+       switch (id) {
+       case WMI_10_4_MGMT_RX_EVENTID:
+               ath10k_wmi_event_mgmt_rx(ar, skb);
+               /* mgmt_rx() owns the skb now! */
+               return;
+       case WMI_10_4_ECHO_EVENTID:
+               ath10k_wmi_event_echo(ar, skb);
+               break;
+       case WMI_10_4_DEBUG_MESG_EVENTID:
+               ath10k_wmi_event_debug_mesg(ar, skb);
+               break;
+       case WMI_10_4_SERVICE_READY_EVENTID:
+               ath10k_wmi_event_service_ready(ar, skb);
+               return;
+       case WMI_10_4_SCAN_EVENTID:
+               ath10k_wmi_event_scan(ar, skb);
+               break;
+       case WMI_10_4_CHAN_INFO_EVENTID:
+               ath10k_wmi_event_chan_info(ar, skb);
+               break;
+       case WMI_10_4_READY_EVENTID:
+               ath10k_wmi_event_ready(ar, skb);
+               break;
+       case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+               ath10k_wmi_event_peer_sta_kickout(ar, skb);
+               break;
+       case WMI_10_4_HOST_SWBA_EVENTID:
+               ath10k_wmi_event_host_swba(ar, skb);
+               break;
+       case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+               ath10k_wmi_event_tbttoffset_update(ar, skb);
+               break;
+       case WMI_10_4_DEBUG_PRINT_EVENTID:
+               ath10k_wmi_event_debug_print(ar, skb);
+               break;
+       case WMI_10_4_VDEV_START_RESP_EVENTID:
+               ath10k_wmi_event_vdev_start_resp(ar, skb);
+               break;
+       case WMI_10_4_VDEV_STOPPED_EVENTID:
+               ath10k_wmi_event_vdev_stopped(ar, skb);
+               break;
+       case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+               ath10k_dbg(ar, ATH10K_DBG_WMI,
+                          "received event id %d not implemented\n", id);
+               break;
+       default:
+               ath10k_warn(ar, "Unknown eventid: %d\n", id);
+               break;
+       }
+
+out:
+       dev_kfree_skb(skb);
+}
+
 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        int ret;
@@ -3762,8 +4708,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
        config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
-       config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
-
+       config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
        config.scan_max_pending_reqs =
                __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
 
@@ -3831,8 +4776,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
        config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
-       config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
-
+       config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
        config.scan_max_pending_reqs =
                __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
 
@@ -3897,7 +4841,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
        config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
        config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
-       config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+       config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 
        config.scan_max_pending_reqs =
                __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
@@ -3950,6 +4894,88 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
        return buf;
 }
 
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+       struct wmi_init_cmd_10_4 *cmd;
+       struct sk_buff *buf;
+       struct wmi_resource_config_10_4 config = {};
+       u32 len;
+
+       config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+       config.num_peers = __cpu_to_le32(ar->max_num_peers);
+       config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+       config.num_tids = __cpu_to_le32(ar->num_tids);
+
+       config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+       config.num_offload_reorder_buffs =
+                       __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+       config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+       config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+       config.tx_chain_mask  = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
+       config.rx_chain_mask  = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+
+       config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+       config.rx_decap_mode        = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+       config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+       config.bmiss_offload_max_vdev =
+                       __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+       config.roam_offload_max_vdev  =
+                       __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+       config.roam_offload_max_ap_profiles =
+                       __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+       config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+       config.num_mcast_table_elems =
+                       __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+       config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+       config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+       config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+       config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+       config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+       config.rx_skip_defrag_timeout_dup_detection_check =
+         __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+       config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+       config.gtk_offload_max_vdev =
+                       __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+       config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+       config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+       config.max_peer_ext_stats =
+                       __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+       config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+       config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+       config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+       config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+       config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+       config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+       config.tt_support =
+                       __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+       config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+       config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+       config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+       len = sizeof(*cmd) +
+             (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+       buf = ath10k_wmi_alloc_skb(ar, len);
+       if (!buf)
+               return ERR_PTR(-ENOMEM);
+
+       cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+       memcpy(&cmd->resource_config, &config, sizeof(config));
+       ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+       return buf;
+}
+
 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
 {
        if (arg->ie_len && !arg->ie)
@@ -4172,7 +5198,6 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
                | WMI_SCAN_EVENT_BSS_CHANNEL
                | WMI_SCAN_EVENT_FOREIGN_CHANNEL
                | WMI_SCAN_EVENT_DEQUEUED;
-       arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
        arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
        arg->n_bssids = 1;
        arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
@@ -5412,9 +6437,65 @@ static const struct wmi_ops wmi_10_2_4_ops = {
        /* .gen_adaptive_qcs not implemented */
 };
 
+static const struct wmi_ops wmi_10_4_ops = {
+       .rx = ath10k_wmi_10_4_op_rx,
+       .map_svc = wmi_10_4_svc_map,
+
+       .pull_scan = ath10k_wmi_op_pull_scan_ev,
+       .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+       .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+       .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+       .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+       .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+       .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+       .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+       .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
+
+       .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+       .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+       .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+       .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+       .gen_init = ath10k_wmi_10_4_op_gen_init,
+       .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+       .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+       .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+       .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+       .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+       .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+       .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+       .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+       .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+       .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+       .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+       .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+       .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+       .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+       .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+       .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+       .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+       .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+       .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+       .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+       .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+       .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+       .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+       .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+       .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+       .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+
+       /* shared with 10.2 */
+       .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+};
+
 int ath10k_wmi_attach(struct ath10k *ar)
 {
        switch (ar->wmi.op_version) {
+       case ATH10K_FW_WMI_OP_VERSION_10_4:
+               ar->wmi.ops = &wmi_10_4_ops;
+               ar->wmi.cmd = &wmi_10_4_cmd_map;
+               ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+               ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+               break;
        case ATH10K_FW_WMI_OP_VERSION_10_2_4:
                ar->wmi.cmd = &wmi_10_2_4_cmd_map;
                ar->wmi.ops = &wmi_10_2_4_ops;
@@ -5452,6 +6533,8 @@ int ath10k_wmi_attach(struct ath10k *ar)
        init_completion(&ar->wmi.service_ready);
        init_completion(&ar->wmi.unified_ready);
 
+       INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+
        return 0;
 }
 
@@ -5459,6 +6542,11 @@ void ath10k_wmi_detach(struct ath10k *ar)
 {
        int i;
 
+       cancel_work_sync(&ar->svc_rdy_work);
+
+       if (ar->svc_rdy_skb)
+               dev_kfree_skb(ar->svc_rdy_skb);
+
        /* free the host memory chunks requested by firmware */
        for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
                dma_free_coherent(ar->dev,
index cf44a3d080a38c7a58b454d7e82419e98f1733a4..232500a5d7bd9e41da6a6380e22908873a92850b 100644 (file)
@@ -150,6 +150,12 @@ enum wmi_service {
        WMI_SERVICE_SAP_AUTH_OFFLOAD,
        WMI_SERVICE_ATF,
        WMI_SERVICE_COEX_GPIO,
+       WMI_SERVICE_ENHANCED_PROXY_STA,
+       WMI_SERVICE_TT,
+       WMI_SERVICE_PEER_CACHING,
+       WMI_SERVICE_AUX_SPECTRAL_INTF,
+       WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+       WMI_SERVICE_BSS_CHANNEL_INFO_64,
 
        /* keep last */
        WMI_SERVICE_MAX,
@@ -218,6 +224,51 @@ enum wmi_main_service {
        WMI_MAIN_SERVICE_TX_ENCAP,
 };
 
+enum wmi_10_4_service {
+       WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+       WMI_10_4_SERVICE_SCAN_OFFLOAD,
+       WMI_10_4_SERVICE_ROAM_OFFLOAD,
+       WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+       WMI_10_4_SERVICE_STA_PWRSAVE,
+       WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+       WMI_10_4_SERVICE_AP_UAPSD,
+       WMI_10_4_SERVICE_AP_DFS,
+       WMI_10_4_SERVICE_11AC,
+       WMI_10_4_SERVICE_BLOCKACK,
+       WMI_10_4_SERVICE_PHYERR,
+       WMI_10_4_SERVICE_BCN_FILTER,
+       WMI_10_4_SERVICE_RTT,
+       WMI_10_4_SERVICE_RATECTRL,
+       WMI_10_4_SERVICE_WOW,
+       WMI_10_4_SERVICE_RATECTRL_CACHE,
+       WMI_10_4_SERVICE_IRAM_TIDS,
+       WMI_10_4_SERVICE_BURST,
+       WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+       WMI_10_4_SERVICE_GTK_OFFLOAD,
+       WMI_10_4_SERVICE_SCAN_SCH,
+       WMI_10_4_SERVICE_CSA_OFFLOAD,
+       WMI_10_4_SERVICE_CHATTER,
+       WMI_10_4_SERVICE_COEX_FREQAVOID,
+       WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+       WMI_10_4_SERVICE_FORCE_FW_HANG,
+       WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+       WMI_10_4_SERVICE_GPIO,
+       WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+       WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+       WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+       WMI_10_4_SERVICE_TX_ENCAP,
+       WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+       WMI_10_4_SERVICE_EARLY_RX,
+       WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+       WMI_10_4_SERVICE_TT,
+       WMI_10_4_SERVICE_ATF,
+       WMI_10_4_SERVICE_PEER_CACHING,
+       WMI_10_4_SERVICE_COEX_GPIO,
+       WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+       WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+       WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+};
+
 static inline char *wmi_service_name(int service_id)
 {
 #define SVCSTR(x) case x: return #x
@@ -299,6 +350,12 @@ static inline char *wmi_service_name(int service_id)
        SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
        SVCSTR(WMI_SERVICE_ATF);
        SVCSTR(WMI_SERVICE_COEX_GPIO);
+       SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+       SVCSTR(WMI_SERVICE_TT);
+       SVCSTR(WMI_SERVICE_PEER_CACHING);
+       SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+       SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+       SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
        default:
                return NULL;
        }
@@ -437,6 +494,95 @@ static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
               WMI_SERVICE_TX_ENCAP, len);
 }
 
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+                                   size_t len)
+{
+       SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+              WMI_SERVICE_BEACON_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+              WMI_SERVICE_SCAN_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+              WMI_SERVICE_ROAM_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+              WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+              WMI_SERVICE_STA_PWRSAVE, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+              WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+       SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+              WMI_SERVICE_AP_UAPSD, len);
+       SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+              WMI_SERVICE_AP_DFS, len);
+       SVCMAP(WMI_10_4_SERVICE_11AC,
+              WMI_SERVICE_11AC, len);
+       SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+              WMI_SERVICE_BLOCKACK, len);
+       SVCMAP(WMI_10_4_SERVICE_PHYERR,
+              WMI_SERVICE_PHYERR, len);
+       SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+              WMI_SERVICE_BCN_FILTER, len);
+       SVCMAP(WMI_10_4_SERVICE_RTT,
+              WMI_SERVICE_RTT, len);
+       SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+              WMI_SERVICE_RATECTRL, len);
+       SVCMAP(WMI_10_4_SERVICE_WOW,
+              WMI_SERVICE_WOW, len);
+       SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+              WMI_SERVICE_RATECTRL_CACHE, len);
+       SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+              WMI_SERVICE_IRAM_TIDS, len);
+       SVCMAP(WMI_10_4_SERVICE_BURST,
+              WMI_SERVICE_BURST, len);
+       SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+              WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+       SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+              WMI_SERVICE_GTK_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+              WMI_SERVICE_SCAN_SCH, len);
+       SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+              WMI_SERVICE_CSA_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_CHATTER,
+              WMI_SERVICE_CHATTER, len);
+       SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+              WMI_SERVICE_COEX_FREQAVOID, len);
+       SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+              WMI_SERVICE_PACKET_POWER_SAVE, len);
+       SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+              WMI_SERVICE_FORCE_FW_HANG, len);
+       SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+              WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+       SVCMAP(WMI_10_4_SERVICE_GPIO,
+              WMI_SERVICE_GPIO, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+              WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+              WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+              WMI_SERVICE_STA_KEEP_ALIVE, len);
+       SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+              WMI_SERVICE_TX_ENCAP, len);
+       SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+              WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+       SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+              WMI_SERVICE_EARLY_RX, len);
+       SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+              WMI_SERVICE_ENHANCED_PROXY_STA, len);
+       SVCMAP(WMI_10_4_SERVICE_TT,
+              WMI_SERVICE_TT, len);
+       SVCMAP(WMI_10_4_SERVICE_ATF,
+              WMI_SERVICE_ATF, len);
+       SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+              WMI_SERVICE_PEER_CACHING, len);
+       SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+              WMI_SERVICE_COEX_GPIO, len);
+       SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+              WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+       SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+              WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+       SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+              WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+}
+
 #undef SVCMAP
 
 /* 2 word representation of MAC addr */
@@ -565,6 +711,48 @@ struct wmi_cmd_map {
        u32 tdls_set_state_cmdid;
        u32 tdls_peer_update_cmdid;
        u32 adaptive_qcs_cmdid;
+       u32 scan_update_request_cmdid;
+       u32 vdev_standby_response_cmdid;
+       u32 vdev_resume_response_cmdid;
+       u32 wlan_peer_caching_add_peer_cmdid;
+       u32 wlan_peer_caching_evict_peer_cmdid;
+       u32 wlan_peer_caching_restore_peer_cmdid;
+       u32 wlan_peer_caching_print_all_peers_info_cmdid;
+       u32 peer_update_wds_entry_cmdid;
+       u32 peer_add_proxy_sta_entry_cmdid;
+       u32 rtt_keepalive_cmdid;
+       u32 oem_req_cmdid;
+       u32 nan_cmdid;
+       u32 vdev_ratemask_cmdid;
+       u32 qboost_cfg_cmdid;
+       u32 pdev_smart_ant_enable_cmdid;
+       u32 pdev_smart_ant_set_rx_antenna_cmdid;
+       u32 peer_smart_ant_set_tx_antenna_cmdid;
+       u32 peer_smart_ant_set_train_info_cmdid;
+       u32 peer_smart_ant_set_node_config_ops_cmdid;
+       u32 pdev_set_antenna_switch_table_cmdid;
+       u32 pdev_set_ctl_table_cmdid;
+       u32 pdev_set_mimogain_table_cmdid;
+       u32 pdev_ratepwr_table_cmdid;
+       u32 pdev_ratepwr_chainmsk_table_cmdid;
+       u32 pdev_fips_cmdid;
+       u32 tt_set_conf_cmdid;
+       u32 fwtest_cmdid;
+       u32 vdev_atf_request_cmdid;
+       u32 peer_atf_request_cmdid;
+       u32 pdev_get_ani_cck_config_cmdid;
+       u32 pdev_get_ani_ofdm_config_cmdid;
+       u32 pdev_reserve_ast_entry_cmdid;
+       u32 pdev_get_nfcal_power_cmdid;
+       u32 pdev_get_tpc_cmdid;
+       u32 pdev_get_ast_info_cmdid;
+       u32 vdev_set_dscp_tid_map_cmdid;
+       u32 pdev_get_info_cmdid;
+       u32 vdev_get_info_cmdid;
+       u32 vdev_filter_neighbor_rx_packets_cmdid;
+       u32 mu_cal_start_cmdid;
+       u32 set_cca_params_cmdid;
+       u32 pdev_bss_chan_info_request_cmdid;
 };
 
 /*
@@ -1220,6 +1408,216 @@ enum wmi_10_2_event_id {
        WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
 };
 
+enum wmi_10_4_cmd_id {
+       WMI_10_4_START_CMDID = 0x9000,
+       WMI_10_4_END_CMDID = 0x9FFF,
+       WMI_10_4_INIT_CMDID,
+       WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+       WMI_10_4_STOP_SCAN_CMDID,
+       WMI_10_4_SCAN_CHAN_LIST_CMDID,
+       WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+       WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+       WMI_10_4_ECHO_CMDID,
+       WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+       WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+       WMI_10_4_PDEV_SET_PARAM_CMDID,
+       WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+       WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+       WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+       WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+       WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+       WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+       WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+       WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+       WMI_10_4_VDEV_CREATE_CMDID,
+       WMI_10_4_VDEV_DELETE_CMDID,
+       WMI_10_4_VDEV_START_REQUEST_CMDID,
+       WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+       WMI_10_4_VDEV_UP_CMDID,
+       WMI_10_4_VDEV_STOP_CMDID,
+       WMI_10_4_VDEV_DOWN_CMDID,
+       WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+       WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+       WMI_10_4_VDEV_SET_PARAM_CMDID,
+       WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+       WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+       WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+       WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+       WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+       WMI_10_4_PEER_CREATE_CMDID,
+       WMI_10_4_PEER_DELETE_CMDID,
+       WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+       WMI_10_4_PEER_SET_PARAM_CMDID,
+       WMI_10_4_PEER_ASSOC_CMDID,
+       WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+       WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+       WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+       WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+       WMI_10_4_PEER_MCAST_GROUP_CMDID,
+       WMI_10_4_BCN_TX_CMDID,
+       WMI_10_4_PDEV_SEND_BCN_CMDID,
+       WMI_10_4_BCN_PRB_TMPL_CMDID,
+       WMI_10_4_BCN_FILTER_RX_CMDID,
+       WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+       WMI_10_4_MGMT_TX_CMDID,
+       WMI_10_4_PRB_TMPL_CMDID,
+       WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+       WMI_10_4_ADDBA_SEND_CMDID,
+       WMI_10_4_ADDBA_STATUS_CMDID,
+       WMI_10_4_DELBA_SEND_CMDID,
+       WMI_10_4_ADDBA_SET_RESP_CMDID,
+       WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+       WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+       WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+       WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+       WMI_10_4_DBGLOG_CFG_CMDID,
+       WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+       WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+       WMI_10_4_PDEV_QVIT_CMDID,
+       WMI_10_4_ROAM_SCAN_MODE,
+       WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+       WMI_10_4_ROAM_SCAN_PERIOD,
+       WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       WMI_10_4_ROAM_AP_PROFILE,
+       WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+       WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+       WMI_10_4_OFL_SCAN_PERIOD,
+       WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+       WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+       WMI_10_4_P2P_GO_SET_BEACON_IE,
+       WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+       WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+       WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+       WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+       WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+       WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+       WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+       WMI_10_4_PDEV_SUSPEND_CMDID,
+       WMI_10_4_PDEV_RESUME_CMDID,
+       WMI_10_4_ADD_BCN_FILTER_CMDID,
+       WMI_10_4_RMV_BCN_FILTER_CMDID,
+       WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+       WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+       WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       WMI_10_4_WOW_ENABLE_CMDID,
+       WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+       WMI_10_4_RTT_MEASREQ_CMDID,
+       WMI_10_4_RTT_TSF_CMDID,
+       WMI_10_4_RTT_KEEPALIVE_CMDID,
+       WMI_10_4_OEM_REQ_CMDID,
+       WMI_10_4_NAN_CMDID,
+       WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+       WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+       WMI_10_4_REQUEST_STATS_CMDID,
+       WMI_10_4_GPIO_CONFIG_CMDID,
+       WMI_10_4_GPIO_OUTPUT_CMDID,
+       WMI_10_4_VDEV_RATEMASK_CMDID,
+       WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+       WMI_10_4_GTK_OFFLOAD_CMDID,
+       WMI_10_4_QBOOST_CFG_CMDID,
+       WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+       WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+       WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+       WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+       WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+       WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+       WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+       WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+       WMI_10_4_FORCE_FW_HANG_CMDID,
+       WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+       WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+       WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+       WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+       WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+       WMI_10_4_PDEV_FIPS_CMDID,
+       WMI_10_4_TT_SET_CONF_CMDID,
+       WMI_10_4_FWTEST_CMDID,
+       WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+       WMI_10_4_PEER_ATF_REQUEST_CMDID,
+       WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+       WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+       WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+       WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+       WMI_10_4_PDEV_GET_TPC_CMDID,
+       WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+       WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+       WMI_10_4_PDEV_GET_INFO_CMDID,
+       WMI_10_4_VDEV_GET_INFO_CMDID,
+       WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+       WMI_10_4_MU_CAL_START_CMDID,
+       WMI_10_4_SET_CCA_PARAMS_CMDID,
+       WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+       WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+       WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+       WMI_10_4_READY_EVENTID,
+       WMI_10_4_DEBUG_MESG_EVENTID,
+       WMI_10_4_START_EVENTID = 0x9000,
+       WMI_10_4_END_EVENTID = 0x9FFF,
+       WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+       WMI_10_4_ECHO_EVENTID,
+       WMI_10_4_UPDATE_STATS_EVENTID,
+       WMI_10_4_INST_RSSI_STATS_EVENTID,
+       WMI_10_4_VDEV_START_RESP_EVENTID,
+       WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+       WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+       WMI_10_4_VDEV_STOPPED_EVENTID,
+       WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+       WMI_10_4_HOST_SWBA_EVENTID,
+       WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+       WMI_10_4_MGMT_RX_EVENTID,
+       WMI_10_4_CHAN_INFO_EVENTID,
+       WMI_10_4_PHYERR_EVENTID,
+       WMI_10_4_ROAM_EVENTID,
+       WMI_10_4_PROFILE_MATCH,
+       WMI_10_4_DEBUG_PRINT_EVENTID,
+       WMI_10_4_PDEV_QVIT_EVENTID,
+       WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+       WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+       WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+       WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+       WMI_10_4_RTT_KEEPALIVE_EVENTID,
+       WMI_10_4_OEM_CAPABILITY_EVENTID,
+       WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+       WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+       WMI_10_4_NAN_EVENTID,
+       WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+       WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+       WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+       WMI_10_4_DCS_INTERFERENCE_EVENTID,
+       WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+       WMI_10_4_CSA_HANDLING_EVENTID,
+       WMI_10_4_GPIO_INPUT_EVENTID,
+       WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+       WMI_10_4_GENERIC_BUFFER_EVENTID,
+       WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+       WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+       WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+       WMI_10_4_WDS_PEER_EVENTID,
+       WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+       WMI_10_4_PDEV_FIPS_EVENTID,
+       WMI_10_4_TT_STATS_EVENTID,
+       WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+       WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+       WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+       WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+       WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+       WMI_10_4_PDEV_TPC_EVENTID,
+       WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+       WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+       WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+       WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+       WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
 enum wmi_phy_mode {
        MODE_11A        = 0,   /* 11a Mode */
        MODE_11G        = 1,   /* 11b/g Mode */
@@ -1349,7 +1747,8 @@ enum wmi_channel_change_cause {
 /* Indicate reason for channel switch */
 #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
 
-#define WMI_MAX_SPATIAL_STREAM   3
+#define WMI_MAX_SPATIAL_STREAM        3 /* default max ss */
+#define WMI_10_4_MAX_SPATIAL_STREAM   4
 
 /* HT Capabilities*/
 #define WMI_HT_CAP_ENABLED                0x0001   /* HT Enabled/ disabled */
@@ -1979,8 +2378,224 @@ struct wmi_resource_config_10_2 {
        __le32 feature_mask;
 } __packed;
 
-#define NUM_UNITS_IS_NUM_VDEVS   0x1
-#define NUM_UNITS_IS_NUM_PEERS   0x2
+#define NUM_UNITS_IS_NUM_VDEVS         BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS         BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS  BIT(2)
+
+struct wmi_resource_config_10_4 {
+       /* Number of virtual devices (VAPs) to support */
+       __le32 num_vdevs;
+
+       /* Number of peer nodes to support */
+       __le32 num_peers;
+
+       /* Number of active peer nodes to support */
+       __le32 num_active_peers;
+
+       /* In offload mode, target supports features like WOW, chatter and other
+        * protocol offloads. In order to support them some functionalities like
+        * reorder buffering, PN checking need to be done in target.
+        * This determines maximum number of peers supported by target in
+        * offload mode.
+        */
+       __le32 num_offload_peers;
+
+       /* Number of reorder buffers available for doing target based reorder
+        * Rx reorder buffering
+        */
+       __le32 num_offload_reorder_buffs;
+
+       /* Number of keys per peer */
+       __le32 num_peer_keys;
+
+       /* Total number of TX/RX data TIDs */
+       __le32 num_tids;
+
+       /* Max skid for resolving hash collisions.
+        * The address search table is sparse, so that if two MAC addresses
+        * result in the same hash value, the second of these conflicting
+        * entries can slide to the next index in the address search table,
+        * and use it, if it is unoccupied.  This ast_skid_limit parameter
+        * specifies the upper bound on how many subsequent indices to search
+        * over to find an unoccupied space.
+        */
+       __le32 ast_skid_limit;
+
+       /* The nominal chain mask for transmit.
+        * The chain mask may be modified dynamically, e.g. to operate AP tx
+        * with a reduced number of chains if no clients are associated.
+        * This configuration parameter specifies the nominal chain-mask that
+        * should be used when not operating with a reduced set of tx chains.
+        */
+       __le32 tx_chain_mask;
+
+       /* The nominal chain mask for receive.
+        * The chain mask may be modified dynamically, e.g. for a client to use
+        * a reduced number of chains for receive if the traffic to the client
+        * is low enough that it doesn't require downlink MIMO or antenna
+        * diversity. This configuration parameter specifies the nominal
+        * chain-mask that should be used when not operating with a reduced
+        * set of rx chains.
+        */
+       __le32 rx_chain_mask;
+
+       /* What rx reorder timeout (ms) to use for the AC.
+        * Each WMM access class (voice, video, best-effort, background) will
+        * have its own timeout value to dictate how long to wait for missing
+        * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+        * been received. This parameter specifies the timeout in milliseconds
+        * for each class.
+        */
+       __le32 rx_timeout_pri[4];
+
+       /* What mode the rx should decap packets to.
+        * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+        * This setting also determines the default TX behavior, however TX
+        * behavior can be modified on a per VAP basis during VAP init
+        */
+       __le32 rx_decap_mode;
+
+       __le32 scan_max_pending_req;
+
+       __le32 bmiss_offload_max_vdev;
+
+       __le32 roam_offload_max_vdev;
+
+       __le32 roam_offload_max_ap_profiles;
+
+       /* How many groups to use for mcast->ucast conversion.
+        * The target's WAL maintains a table to hold information regarding
+        * which peers belong to a given multicast group, so that if
+        * multicast->unicast conversion is enabled, the target can convert
+        * multicast tx frames to a series of unicast tx frames, to each peer
+        * within the multicast group. This num_mcast_groups configuration
+        * parameter tells the target how many multicast groups to provide
+        * storage for within its multicast group membership table.
+        */
+       __le32 num_mcast_groups;
+
+       /* Size to alloc for the mcast membership table.
+        * This num_mcast_table_elems configuration parameter tells the target
+        * how many peer elements it needs to provide storage for in its
+        * multicast group membership table. These multicast group membership
+        * table elements are shared by the multicast groups stored within
+        * the table.
+        */
+       __le32 num_mcast_table_elems;
+
+       /* Whether/how to do multicast->unicast conversion.
+        * This configuration parameter specifies whether the target should
+        * perform multicast --> unicast conversion on transmit, and if so,
+        * what to do if it finds no entries in its multicast group membership
+        * table for the multicast IP address in the tx frame.
+        * Configuration value:
+        * 0 -> Do not perform multicast to unicast conversion.
+        * 1 -> Convert multicast frames to unicast, if the IP multicast address
+        *      from the tx frame is found in the multicast group membership
+        *      table.  If the IP multicast address is not found, drop the frame
+        * 2 -> Convert multicast frames to unicast, if the IP multicast address
+        *      from the tx frame is found in the multicast group membership
+        *      table.  If the IP multicast address is not found, transmit the
+        *      frame as multicast.
+        */
+       __le32 mcast2ucast_mode;
+
+       /* How much memory to allocate for a tx PPDU dbg log.
+        * This parameter controls how much memory the target will allocate to
+        * store a log of tx PPDU meta-information (how large the PPDU was,
+        * when it was sent, whether it was successful, etc.)
+        */
+       __le32 tx_dbg_log_size;
+
+       /* How many AST entries to be allocated for WDS */
+       __le32 num_wds_entries;
+
+       /* MAC DMA burst size. 0 -default, 1 -256B */
+       __le32 dma_burst_size;
+
+       /* Fixed delimiters to be inserted after every MPDU to account for
+        * interface latency to avoid underrun.
+        */
+       __le32 mac_aggr_delim;
+
+       /* Determine whether target is responsible for detecting duplicate
+        * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+        * is always performed on the target.
+        *
+        * 0: target responsible for frag timeout and dup checking
+        * 1: host responsible for frag timeout and dup checking
+        */
+       __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+       /* Configuration for VoW : No of Video nodes to be supported and max
+        * no of descriptors for each video link (node).
+        */
+       __le32 vow_config;
+
+       /* Maximum vdev that could use gtk offload */
+       __le32 gtk_offload_max_vdev;
+
+       /* Number of msdu descriptors target should use */
+       __le32 num_msdu_desc;
+
+       /* Max number of tx fragments per MSDU.
+        * This parameter controls the max number of tx fragments per MSDU.
+        * This will passed by target as part of the WMI_SERVICE_READY event
+        * and is overridden by the OS shim as required.
+        */
+       __le32 max_frag_entries;
+
+       /* Max number of extended peer stats.
+        * This parameter controls the max number of peers for which extended
+        * statistics are supported by target
+        */
+       __le32 max_peer_ext_stats;
+
+       /* Smart antenna capabilities information.
+        * 1 - Smart antenna is enabled
+        * 0 - Smart antenna is disabled
+        * In future this can contain smart antenna specific capabilities.
+        */
+       __le32 smart_ant_cap;
+
+       /* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+        * during init.
+        */
+       __le32 bk_minfree;
+       __le32 be_minfree;
+       __le32 vi_minfree;
+       __le32 vo_minfree;
+
+       /* Rx batch mode capability.
+        * 1 - Rx batch mode enabled
+        * 0 - Rx batch mode disabled
+        */
+       __le32 rx_batchmode;
+
+       /* Thermal throttling capability.
+        * 1 - Capable of thermal throttling
+        * 0 - Not capable of thermal throttling
+        */
+       __le32 tt_support;
+
+       /* ATF configuration.
+        * 1  - Enable ATF
+        * 0  - Disable ATF
+        */
+       __le32 atf_config;
+
+       /* Configure padding to manage IP header un-alignment
+        * 1  - Enable padding
+        * 0  - Disable padding
+        */
+       __le32 iphdr_pad_config;
+
+       /* qwrap configuration
+        * 1  - This is qwrap configuration
+        * 0  - This is not qwrap
+        */
+       __le32 qwrap_config;
+} __packed;
 
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
@@ -2014,6 +2629,11 @@ struct wmi_init_cmd_10_2 {
        struct wmi_host_mem_chunks mem_chunks;
 } __packed;
 
+struct wmi_init_cmd_10_4 {
+       struct wmi_resource_config_10_4 resource_config;
+       struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
 struct wmi_chan_list_entry {
        __le16 freq;
        u8 phy_mode; /* valid for 10.2 only */
@@ -2260,15 +2880,17 @@ enum wmi_bss_filter {
 };
 
 enum wmi_scan_event_type {
-       WMI_SCAN_EVENT_STARTED         = 0x1,
-       WMI_SCAN_EVENT_COMPLETED       = 0x2,
-       WMI_SCAN_EVENT_BSS_CHANNEL     = 0x4,
-       WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
-       WMI_SCAN_EVENT_DEQUEUED        = 0x10,
-       WMI_SCAN_EVENT_PREEMPTED       = 0x20, /* possibly by high-prio scan */
-       WMI_SCAN_EVENT_START_FAILED    = 0x40,
-       WMI_SCAN_EVENT_RESTARTED       = 0x80,
-       WMI_SCAN_EVENT_MAX             = 0x8000
+       WMI_SCAN_EVENT_STARTED              = BIT(0),
+       WMI_SCAN_EVENT_COMPLETED            = BIT(1),
+       WMI_SCAN_EVENT_BSS_CHANNEL          = BIT(2),
+       WMI_SCAN_EVENT_FOREIGN_CHANNEL      = BIT(3),
+       WMI_SCAN_EVENT_DEQUEUED             = BIT(4),
+       /* possibly by high-prio scan */
+       WMI_SCAN_EVENT_PREEMPTED            = BIT(5),
+       WMI_SCAN_EVENT_START_FAILED         = BIT(6),
+       WMI_SCAN_EVENT_RESTARTED            = BIT(7),
+       WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+       WMI_SCAN_EVENT_MAX                  = BIT(15),
 };
 
 enum wmi_scan_completion_reason {
@@ -2276,6 +2898,7 @@ enum wmi_scan_completion_reason {
        WMI_SCAN_REASON_CANCELLED,
        WMI_SCAN_REASON_PREEMPTED,
        WMI_SCAN_REASON_TIMEDOUT,
+       WMI_SCAN_REASON_INTERNAL_FAILURE,
        WMI_SCAN_REASON_MAX,
 };
 
@@ -2329,6 +2952,21 @@ struct wmi_mgmt_rx_event_v2 {
        u8 buf[0];
 } __packed;
 
+struct wmi_10_4_mgmt_rx_hdr {
+       __le32 channel;
+       __le32 snr;
+           u8 rssi_ctl[4];
+       __le32 rate;
+       __le32 phy_mode;
+       __le32 buf_len;
+       __le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+       struct wmi_10_4_mgmt_rx_hdr hdr;
+       u8 buf[0];
+} __packed;
+
 #define WMI_RX_STATUS_OK                       0x00
 #define WMI_RX_STATUS_ERR_CRC                  0x01
 #define WMI_RX_STATUS_ERR_DECRYPT              0x08
@@ -2613,6 +3251,48 @@ struct wmi_pdev_param_map {
        u32 burst_dur;
        u32 burst_enable;
        u32 cal_period;
+       u32 aggr_burst;
+       u32 rx_decap_mode;
+       u32 smart_antenna_default_antenna;
+       u32 igmpmld_override;
+       u32 igmpmld_tid;
+       u32 antenna_gain;
+       u32 rx_filter;
+       u32 set_mcast_to_ucast_tid;
+       u32 proxy_sta_mode;
+       u32 set_mcast2ucast_mode;
+       u32 set_mcast2ucast_buffer;
+       u32 remove_mcast2ucast_buffer;
+       u32 peer_sta_ps_statechg_enable;
+       u32 igmpmld_ac_override;
+       u32 block_interbss;
+       u32 set_disable_reset_cmdid;
+       u32 set_msdu_ttl_cmdid;
+       u32 set_ppdu_duration_cmdid;
+       u32 txbf_sound_period_cmdid;
+       u32 set_promisc_mode_cmdid;
+       u32 set_burst_mode_cmdid;
+       u32 en_stats;
+       u32 mu_group_policy;
+       u32 noise_detection;
+       u32 noise_threshold;
+       u32 dpd_enable;
+       u32 set_mcast_bcast_echo;
+       u32 atf_strict_sch;
+       u32 atf_sched_duration;
+       u32 ant_plzn;
+       u32 mgmt_retry_limit;
+       u32 sensitivity_level;
+       u32 signed_txpower_2g;
+       u32 signed_txpower_5g;
+       u32 enable_per_tid_amsdu;
+       u32 enable_per_tid_ampdu;
+       u32 cca_threshold;
+       u32 rts_fixed_rate;
+       u32 pdev_reset;
+       u32 wapi_mbssid_offset;
+       u32 arp_srcaddr;
+       u32 arp_dstaddr;
 };
 
 #define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -2828,6 +3508,100 @@ enum wmi_10x_pdev_param {
        WMI_10X_PDEV_PARAM_CAL_PERIOD
 };
 
+enum wmi_10_4_pdev_param {
+       WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+       WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+       WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+       WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+       WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+       WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+       WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+       WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+       WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+       WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+       WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+       WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+       WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+       WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+       WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+       WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+       WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       WMI_10_4_PDEV_PARAM_PMF_QOS,
+       WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_DCS,
+       WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+       WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+       WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+       WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+       WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+       WMI_10_4_PDEV_PARAM_PROXY_STA,
+       WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+       WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+       WMI_10_4_PDEV_PARAM_AGGR_BURST,
+       WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+       WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+       WMI_10_4_PDEV_PARAM_BURST_DUR,
+       WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+       WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+       WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+       WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+       WMI_10_4_PDEV_PARAM_RX_FILTER,
+       WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+       WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+       WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+       WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+       WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+       WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+       WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+       WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+       WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+       WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+       WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+       WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+       WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+       WMI_10_4_PDEV_PARAM_EN_STATS,
+       WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+       WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+       WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+       WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+       WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+       WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+       WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+       WMI_10_4_PDEV_PARAM_ANT_PLZN,
+       WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+       WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+       WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+       WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+       WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+       WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+       WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+       WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+       WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+       WMI_10_4_PDEV_PARAM_PDEV_RESET,
+       WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+       WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+       WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+};
+
 struct wmi_pdev_set_param_cmd {
        __le32 param_id;
        __le32 param_value;
@@ -3506,6 +4280,22 @@ struct wmi_vdev_param_map {
        u32 drop_unencry;
        u32 tx_encap_type;
        u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+       u32 rc_num_retries;
+       u32 cabq_maxdur;
+       u32 mfptest_set;
+       u32 rts_fixed_rate;
+       u32 vht_sgimask;
+       u32 vht80_ratemask;
+       u32 early_rx_adjust_enable;
+       u32 early_rx_tgt_bmiss_num;
+       u32 early_rx_bmiss_sample_cycle;
+       u32 early_rx_slop_step;
+       u32 early_rx_init_slop;
+       u32 early_rx_adjust_pause;
+       u32 proxy_sta;
+       u32 meru_vc;
+       u32 rx_decap_type;
+       u32 bw_nss_ratemask;
 };
 
 #define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -3764,11 +4554,85 @@ enum wmi_10x_vdev_param {
        WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
 };
 
+enum wmi_10_4_vdev_param {
+       WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+       WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+       WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+       WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+       WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+       WMI_10_4_VDEV_PARAM_SLOT_TIME,
+       WMI_10_4_VDEV_PARAM_PREAMBLE,
+       WMI_10_4_VDEV_PARAM_SWBA_TIME,
+       WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+       WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+       WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+       WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       WMI_10_4_VDEV_PARAM_WDS,
+       WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+       WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+       WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+       WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+       WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+       WMI_10_4_VDEV_PARAM_CHWIDTH,
+       WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+       WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+       WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+       WMI_10_4_VDEV_PARAM_MGMT_RATE,
+       WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+       WMI_10_4_VDEV_PARAM_FIXED_RATE,
+       WMI_10_4_VDEV_PARAM_SGI,
+       WMI_10_4_VDEV_PARAM_LDPC,
+       WMI_10_4_VDEV_PARAM_TX_STBC,
+       WMI_10_4_VDEV_PARAM_RX_STBC,
+       WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+       WMI_10_4_VDEV_PARAM_DEF_KEYID,
+       WMI_10_4_VDEV_PARAM_NSS,
+       WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+       WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+       WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+       WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+       WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+       WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+       WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+       WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+       WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+       WMI_10_4_VDEV_PARAM_TXBF,
+       WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+       WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+       WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+       WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+       WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+       WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+       WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+       WMI_10_4_VDEV_PARAM_PROXY_STA,
+       WMI_10_4_VDEV_PARAM_MERU_VC,
+       WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+       WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+};
+
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
+#define WMI_TXBF_STS_CAP_OFFSET_LSB    4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK   0xf0
+#define WMI_BF_SOUND_DIM_OFFSET_LSB    8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK   0xf00
+
 /* slot time long */
 #define WMI_VDEV_SLOT_TIME_LONG                0x1
 /* slot time short */
@@ -4305,6 +5169,14 @@ struct wmi_tim_info {
        __le32 tim_num_ps_pending;
 } __packed;
 
+struct wmi_tim_info_arg {
+       __le32 tim_len;
+       __le32 tim_mcast;
+       const __le32 *tim_bitmap;
+       __le32 tim_changed;
+       __le32 tim_num_ps_pending;
+} __packed;
+
 /* Maximum number of NOA Descriptors supported */
 #define WMI_P2P_MAX_NOA_DESCRIPTORS 4
 #define WMI_P2P_OPPPS_ENABLE_BIT       BIT(0)
@@ -4336,6 +5208,47 @@ struct wmi_host_swba_event {
        struct wmi_bcn_info bcn_info[0];
 } __packed;
 
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+       __le32 tim_len;
+       __le32 tim_mcast;
+       __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+       __le32 tim_changed;
+       __le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+       /* Bit 0 - Flag to indicate an update in NOA schedule
+        * Bits 7-1 - Reserved
+        */
+       u8 changed;
+       /* NOA index */
+       u8 index;
+       /* Bit 0 - Opp PS state of the AP
+        * Bits 1-7 - Ctwindow in TUs
+        */
+       u8 ctwindow_oppps;
+       /* Number of NOA descriptors */
+       u8 num_descriptors;
+
+       struct wmi_p2p_noa_descriptor
+               noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+       struct wmi_10_4_tim_info tim_info;
+       struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+       __le32 vdev_map;
+       struct wmi_10_4_bcn_info bcn_info[0];
+} __packed;
+
 #define WMI_MAX_AP_VDEV 16
 
 struct wmi_tbtt_offset_event {
@@ -4660,6 +5573,18 @@ struct wmi_chan_info_event {
        __le32 cycle_count;
 } __packed;
 
+struct wmi_10_4_chan_info_event {
+       __le32 err_code;
+       __le32 freq;
+       __le32 cmd_flags;
+       __le32 noise_floor;
+       __le32 rx_clear_count;
+       __le32 cycle_count;
+       __le32 chan_tx_pwr_range;
+       __le32 chan_tx_pwr_tp;
+       __le32 rx_frame_count;
+} __packed;
+
 struct wmi_peer_sta_kickout_event {
        struct wmi_mac_addr peer_macaddr;
 } __packed;
@@ -4840,6 +5765,9 @@ struct wmi_ch_info_ev_arg {
        __le32 noise_floor;
        __le32 rx_clear_count;
        __le32 cycle_count;
+       __le32 chan_tx_pwr_range;
+       __le32 chan_tx_pwr_tp;
+       __le32 rx_frame_count;
 };
 
 struct wmi_vdev_start_ev_arg {
@@ -4855,7 +5783,7 @@ struct wmi_peer_kick_ev_arg {
 
 struct wmi_swba_ev_arg {
        __le32 vdev_map;
-       const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
+       struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
        const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
 };
 
@@ -5085,6 +6013,12 @@ struct wmi_tdls_peer_capab_arg {
        u32 pref_offchan_bw;
 };
 
+enum wmi_txbf_conf {
+       WMI_TXBF_CONF_UNSUPPORTED,
+       WMI_TXBF_CONF_BEFORE_ASSOC,
+       WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
index a68d8fd853a3bb6f314a5c26d8f480c8d00a1792..8e02b381990f138c7e18c70b4d2789f6460d868e 100644 (file)
@@ -301,8 +301,26 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
                ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
 
 exit:
+       if (ret) {
+               switch (ar->state) {
+               case ATH10K_STATE_ON:
+                       ar->state = ATH10K_STATE_RESTARTING;
+                       ret = 1;
+                       break;
+               case ATH10K_STATE_OFF:
+               case ATH10K_STATE_RESTARTING:
+               case ATH10K_STATE_RESTARTED:
+               case ATH10K_STATE_UTF:
+               case ATH10K_STATE_WEDGED:
+                       ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+                                   ar->state);
+                       ret = -EIO;
+                       break;
+               }
+       }
+
        mutex_unlock(&ar->conf_mutex);
-       return ret ? 1 : 0;
+       return ret;
 }
 
 int ath10k_wow_init(struct ath10k *ar)
index 14cab1403dd6071d48179922346b676d89e9610b..112d8a9b8d4319d1a8cbc9dd956911a4c09b39f6 100644 (file)
@@ -427,7 +427,7 @@ struct htc_endpoint_credit_dist {
 };
 
 /*
- * credit distibution code that is passed into the distrbution function,
+ * credit distribution code that is passed into the distribution function,
  * there are mandatory and optional codes that must be handled
  */
 enum htc_credit_dist_reason {
index fc595b92ac56007a024bc9cb5d871e19cef4ba9c..c5f8bc4b5595ecda04fcb7b130f68180c6d74791 100644 (file)
 #define AR_PHY_MODE              (AR_SM_BASE + 0x8)
 #define AR_PHY_ACTIVE            (AR_SM_BASE + 0xc)
 #define AR_PHY_SPUR_MASK_A       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
-#define AR_PHY_SPUR_MASK_B       (AR_SM_BASE + 0x24)
+#define AR_PHY_SPUR_MASK_B       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24))
 #define AR_PHY_SPECTRAL_SCAN     (AR_SM_BASE + 0x28)
 #define AR_PHY_RADAR_BW_FILTER   (AR_SM_BASE + 0x2c)
 #define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A                       0x3FF
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S                     0
 
-#define AR_PHY_TEST              (AR_SM_BASE + 0x160)
+#define AR_PHY_TEST              (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160))
 
 #define AR_PHY_TEST_BBB_OBS_SEL       0x780000
 #define AR_PHY_TEST_BBB_OBS_SEL_S     19
 #define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S          29
 
 
-#define AR_PHY_TSTDAC            (AR_SM_BASE + 0x168)
+#define AR_PHY_TSTDAC            (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168))
 
-#define AR_PHY_CHAN_STATUS       (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_STATUS       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c))
 
 #define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ    0x00000008
 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S  3
 
-#define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + 0x174)
-#define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + 0x178)
-#define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + 0x17c)
-#define AR_PHY_CHAN_INFO_GAIN_0  (AR_SM_BASE + 0x180)
-#define AR_PHY_SCRAMBLER_SEED    (AR_SM_BASE + 0x190)
-#define AR_PHY_CCK_TX_CTRL       (AR_SM_BASE + 0x194)
+#define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174))
+#define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178))
+#define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c))
+#define AR_PHY_CHAN_INFO_GAIN_0  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180))
+#define AR_PHY_SCRAMBLER_SEED    (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190))
+#define AR_PHY_CCK_TX_CTRL       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194))
 
 #define AR_PHY_HEAVYCLIP_CTL     (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
 #define AR_PHY_HEAVYCLIP_20      (AR_SM_BASE + 0x1a8)
 #define AR_PHY_HEAVYCLIP_40      (AR_SM_BASE + 0x1ac)
+#define AR_PHY_HEAVYCLIP_1      (AR_SM_BASE + 0x19c)
+#define AR_PHY_HEAVYCLIP_2      (AR_SM_BASE + 0x1a0)
+#define AR_PHY_HEAVYCLIP_3      (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_4      (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_5      (AR_SM_BASE + 0x1ac)
 #define AR_PHY_ILLEGAL_TXRATE    (AR_SM_BASE + 0x1b0)
 
 #define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
index a7a81b3969cec7e79b2cb73959c4d8ff1fb7489e..c85c47978e1e48e4dda029b1f9d55e5c04accf11 100644 (file)
@@ -172,14 +172,6 @@ struct ath_txq {
        struct sk_buff_head complete_q;
 };
 
-struct ath_atx_ac {
-       struct ath_txq *txq;
-       struct list_head list;
-       struct list_head tid_q;
-       bool clear_ps_filter;
-       bool sched;
-};
-
 struct ath_frame_info {
        struct ath_buf *bf;
        u16 framelen;
@@ -242,7 +234,7 @@ struct ath_atx_tid {
        struct sk_buff_head buf_q;
        struct sk_buff_head retry_q;
        struct ath_node *an;
-       struct ath_atx_ac *ac;
+       struct ath_txq *txq;
        unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
        u16 seq_start;
        u16 seq_next;
@@ -252,8 +244,8 @@ struct ath_atx_tid {
        int baw_tail;   /* next unused tx buffer slot */
 
        s8 bar_index;
-       bool sched;
        bool active;
+       bool clear_ps_filter;
 };
 
 struct ath_node {
@@ -261,7 +253,6 @@ struct ath_node {
        struct ieee80211_sta *sta; /* station struct we're part of */
        struct ieee80211_vif *vif; /* interface with which we're associated */
        struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
-       struct ath_atx_ac ac[IEEE80211_NUM_ACS];
 
        u16 maxampdu;
        u8 mpdudensity;
@@ -410,6 +401,12 @@ enum ath_offchannel_state {
        ATH_OFFCHANNEL_ROC_DONE,
 };
 
+enum ath_roc_complete_reason {
+       ATH_ROC_COMPLETE_EXPIRE,
+       ATH_ROC_COMPLETE_ABORT,
+       ATH_ROC_COMPLETE_CANCEL,
+};
+
 struct ath_offchannel {
        struct ath_chanctx chan;
        struct timer_list timer;
@@ -471,7 +468,8 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
 void ath_chanctx_set_next(struct ath_softc *sc, bool force);
 void ath_offchannel_next(struct ath_softc *sc);
 void ath_scan_complete(struct ath_softc *sc, bool abort);
-void ath_roc_complete(struct ath_softc *sc, bool abort);
+void ath_roc_complete(struct ath_softc *sc,
+                     enum ath_roc_complete_reason reason);
 struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
 
 #else
index 206665059d66a67b0bbd2a816ba4c2c4d187ee48..90f5773a1a614e34bceac511ba5d415d8614596b 100644 (file)
@@ -915,18 +915,27 @@ void ath_offchannel_next(struct ath_softc *sc)
        }
 }
 
-void ath_roc_complete(struct ath_softc *sc, bool abort)
+void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
-       if (abort)
+       sc->offchannel.roc_vif = NULL;
+       sc->offchannel.roc_chan = NULL;
+
+       switch (reason) {
+       case ATH_ROC_COMPLETE_ABORT:
                ath_dbg(common, CHAN_CTX, "RoC aborted\n");
-       else
+               ieee80211_remain_on_channel_expired(sc->hw);
+               break;
+       case ATH_ROC_COMPLETE_EXPIRE:
                ath_dbg(common, CHAN_CTX, "RoC expired\n");
+               ieee80211_remain_on_channel_expired(sc->hw);
+               break;
+       case ATH_ROC_COMPLETE_CANCEL:
+               ath_dbg(common, CHAN_CTX, "RoC canceled\n");
+               break;
+       }
 
-       sc->offchannel.roc_vif = NULL;
-       sc->offchannel.roc_chan = NULL;
-       ieee80211_remain_on_channel_expired(sc->hw);
        ath_offchannel_next(sc);
        ath9k_ps_restore(sc);
 }
@@ -1058,7 +1067,7 @@ static void ath_offchannel_timer(unsigned long data)
        case ATH_OFFCHANNEL_ROC_START:
        case ATH_OFFCHANNEL_ROC_WAIT:
                sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
-               ath_roc_complete(sc, false);
+               ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE);
                break;
        default:
                break;
index dbf8f495964217e1b5799fb165155ff3c88b4894..da32c8faad94f1b58f2157c046e5db1bdb04f6f3 100644 (file)
@@ -765,6 +765,8 @@ static int read_file_reset(struct seq_file *file, void *data)
                [RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
                [RESET_TYPE_MCI] = "MCI Reset",
                [RESET_TYPE_CALIBRATION] = "Calibration error",
+               [RESET_TX_DMA_ERROR] = "Tx DMA stop error",
+               [RESET_RX_DMA_ERROR] = "Rx DMA stop error",
        };
        int i;
 
index a8e9319958e6eca7a645610c24c6b46adc142f3b..cd68c5f0e751a57ed6341e56d710234f92b0f5db 100644 (file)
@@ -50,6 +50,8 @@ enum ath_reset_type {
        RESET_TYPE_BEACON_STUCK,
        RESET_TYPE_MCI,
        RESET_TYPE_CALIBRATION,
+       RESET_TX_DMA_ERROR,
+       RESET_RX_DMA_ERROR,
        __RESET_TYPE_MAX
 };
 
index ffca918ff16aff4be941d572ac19d4f152e5b2c2..c2ca57a2ed09d0f14ee10520b8a8c27f5d5543c2 100644 (file)
@@ -26,12 +26,11 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
        struct ath_node *an = file->private_data;
        struct ath_softc *sc = an->sc;
        struct ath_atx_tid *tid;
-       struct ath_atx_ac *ac;
        struct ath_txq *txq;
        u32 len = 0, size = 4096;
        char *buf;
        size_t retval;
-       int tidno, acno;
+       int tidno;
 
        buf = kzalloc(size, GFP_KERNEL);
        if (buf == NULL)
@@ -48,19 +47,6 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
        len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
                         an->mpdudensity);
 
-       len += scnprintf(buf + len, size - len,
-                        "%2s%7s\n", "AC", "SCHED");
-
-       for (acno = 0, ac = &an->ac[acno];
-            acno < IEEE80211_NUM_ACS; acno++, ac++) {
-               txq = ac->txq;
-               ath_txq_lock(sc, txq);
-               len += scnprintf(buf + len, size - len,
-                                "%2d%7d\n",
-                                acno, ac->sched);
-               ath_txq_unlock(sc, txq);
-       }
-
        len += scnprintf(buf + len, size - len,
                         "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
                         "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
@@ -68,7 +54,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
 
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
-               txq = tid->ac->txq;
+               txq = tid->txq;
                ath_txq_lock(sc, txq);
                if (tid->active) {
                        len += scnprintf(buf + len, size - len,
@@ -80,7 +66,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
                                         tid->baw_head,
                                         tid->baw_tail,
                                         tid->bar_index,
-                                        tid->sched);
+                                        !list_empty(&tid->list));
                }
                ath_txq_unlock(sc, txq);
        }
index e98a9eaba7ff3f1b84a85945e63e901c8a216207..1ece42c2443d79e3d73e999b0ff5a6edb8b08fc7 100644 (file)
@@ -30,6 +30,157 @@ struct ath_radar_data {
        u8 pulse_length_pri;
 };
 
+/**** begin: CHIRP ************************************************************/
+
+/* min and max gradients for defined FCC chirping pulses, given by
+ * - 20MHz chirp width over a pulse width of  50us
+ * -  5MHz chirp width over a pulse width of 100us
+ */
+static const int BIN_DELTA_MIN         = 1;
+static const int BIN_DELTA_MAX         = 10;
+
+/* we need at least 3 deltas / 4 samples for a reliable chirp detection */
+#define NUM_DIFFS 3
+static const int FFT_NUM_SAMPLES       = (NUM_DIFFS + 1);
+
+/* Threshold for difference of delta peaks */
+static const int MAX_DIFF              = 2;
+
+/* width range to be checked for chirping */
+static const int MIN_CHIRP_PULSE_WIDTH = 20;
+static const int MAX_CHIRP_PULSE_WIDTH = 110;
+
+struct ath9k_dfs_fft_20 {
+       u8 bin[28];
+       u8 lower_bins[3];
+} __packed;
+struct ath9k_dfs_fft_40 {
+       u8 bin[64];
+       u8 lower_bins[3];
+       u8 upper_bins[3];
+} __packed;
+
+static inline int fft_max_index(u8 *bins)
+{
+       return (bins[2] & 0xfc) >> 2;
+}
+static inline int fft_max_magnitude(u8 *bins)
+{
+       return (bins[0] & 0xc0) >> 6 | bins[1] << 2 | (bins[2] & 0x03) << 10;
+}
+static inline u8 fft_bitmap_weight(u8 *bins)
+{
+       return bins[0] & 0x3f;
+}
+
+static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
+                                   bool is_ctl, bool is_ext)
+{
+       const int DFS_UPPER_BIN_OFFSET = 64;
+       /* if detected radar on both channels, select the significant one */
+       if (is_ctl && is_ext) {
+               /* first check wether channels have 'strong' bins */
+               is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
+               is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
+
+               /* if still unclear, take higher magnitude */
+               if (is_ctl && is_ext) {
+                       int mag_lower = fft_max_magnitude(fft->lower_bins);
+                       int mag_upper = fft_max_magnitude(fft->upper_bins);
+                       if (mag_upper > mag_lower)
+                               is_ctl = false;
+                       else
+                               is_ext = false;
+               }
+       }
+       if (is_ctl)
+               return fft_max_index(fft->lower_bins);
+       return fft_max_index(fft->upper_bins) + DFS_UPPER_BIN_OFFSET;
+}
+static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
+                                int datalen, bool is_ctl, bool is_ext)
+{
+       int i;
+       int max_bin[FFT_NUM_SAMPLES];
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       int prev_delta;
+
+       if (IS_CHAN_HT40(ah->curchan)) {
+               struct ath9k_dfs_fft_40 *fft = (struct ath9k_dfs_fft_40 *) data;
+               int num_fft_packets = datalen / sizeof(*fft);
+               if (num_fft_packets == 0)
+                       return false;
+
+               ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n",
+                       datalen, num_fft_packets);
+               if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+                       ath_dbg(common, DFS, "not enough packets for chirp\n");
+                       return false;
+               }
+               /* HW sometimes adds 2 garbage bytes in front of FFT samples */
+               if ((datalen % sizeof(*fft)) == 2) {
+                       fft = (struct ath9k_dfs_fft_40 *) (data + 2);
+                       ath_dbg(common, DFS, "fixing datalen by 2\n");
+               }
+               if (IS_CHAN_HT40MINUS(ah->curchan)) {
+                       int temp = is_ctl;
+                       is_ctl = is_ext;
+                       is_ext = temp;
+               }
+               for (i = 0; i < FFT_NUM_SAMPLES; i++)
+                       max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
+                                                             is_ext);
+       } else {
+               struct ath9k_dfs_fft_20 *fft = (struct ath9k_dfs_fft_20 *) data;
+               int num_fft_packets = datalen / sizeof(*fft);
+               if (num_fft_packets == 0)
+                       return false;
+               ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n",
+                       datalen, num_fft_packets);
+               if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+                       ath_dbg(common, DFS, "not enough packets for chirp\n");
+                       return false;
+               }
+               /* in ht20, this is a 6-bit signed number => shift it to 0 */
+               for (i = 0; i < FFT_NUM_SAMPLES; i++)
+                       max_bin[i] = fft_max_index(fft[i].lower_bins) ^ 0x20;
+       }
+       ath_dbg(common, DFS, "bin_max = [%d, %d, %d, %d]\n",
+               max_bin[0], max_bin[1], max_bin[2], max_bin[3]);
+
+       /* Check for chirp attributes within specs
+        * a) delta of adjacent max_bins is within range
+        * b) delta of adjacent deltas are within tolerance
+        */
+       prev_delta = 0;
+       for (i = 0; i < NUM_DIFFS; i++) {
+               int ddelta = -1;
+               int delta = max_bin[i + 1] - max_bin[i];
+
+               /* ensure gradient is within valid range */
+               if (abs(delta) < BIN_DELTA_MIN || abs(delta) > BIN_DELTA_MAX) {
+                       ath_dbg(common, DFS, "CHIRP: invalid delta %d "
+                               "in sample %d\n", delta, i);
+                       return false;
+               }
+               if (i == 0)
+                       goto done;
+               ddelta = delta - prev_delta;
+               if (abs(ddelta) > MAX_DIFF) {
+                       ath_dbg(common, DFS, "CHIRP: ddelta %d too high\n",
+                               ddelta);
+                       return false;
+               }
+done:
+               ath_dbg(common, DFS, "CHIRP - %d: delta=%d, ddelta=%d\n",
+                       i, delta, ddelta);
+               prev_delta = delta;
+       }
+       return true;
+}
+/**** end: CHIRP **************************************************************/
+
 /* convert pulse duration to usecs, considering clock mode */
 static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
 {
@@ -113,12 +264,6 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
                return false;
        }
 
-       /*
-        * TODO: check chirping pulses
-        *       checks for chirping are dependent on the DFS regulatory domain
-        *       used, which is yet TBD
-        */
-
        /* convert duration to usecs */
        pe->width = dur_to_usecs(sc->sc_ah, dur);
        pe->rssi = rssi;
@@ -190,6 +335,16 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
        if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
                return;
 
+       if (pe.width > MIN_CHIRP_PULSE_WIDTH &&
+           pe.width < MAX_CHIRP_PULSE_WIDTH) {
+               bool is_ctl = !!(ard.pulse_bw_info & PRI_CH_RADAR_FOUND);
+               bool is_ext = !!(ard.pulse_bw_info & EXT_CH_RADAR_FOUND);
+               int clen = datalen - 3;
+               pe.chirp = ath9k_check_chirping(sc, data, clen, is_ctl, is_ext);
+       } else {
+               pe.chirp = false;
+       }
+
        ath_dbg(common, DFS,
                "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
                "width=%d, rssi=%d, delta_ts=%llu\n",
@@ -198,7 +353,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
        sc->dfs_prev_pulse_ts = pe.ts;
        if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
                ath9k_dfs_process_radar_pulse(sc, &pe);
-       if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+       if (IS_CHAN_HT40(ah->curchan) &&
+           ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
                pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
                ath9k_dfs_process_radar_pulse(sc, &pe);
        }
index 39eaf9b6e9b45c610dbf40208151431bbf51b64c..1e84882f8c5b35374df2a6460aada9bfac1bfa5a 100644 (file)
@@ -74,7 +74,7 @@ static struct ath_ps_ops ath9k_htc_ps_ops = {
 
 static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
 {
-       int time_left;
+       unsigned long time_left;
 
        if (atomic_read(&priv->htc->tgt_ready) > 0) {
                atomic_dec(&priv->htc->tgt_ready);
index d2408da38c1c1321354c1fb6f931cfb202250949..2294709ee8b0ae62144be926f55f4ac4f10a86c6 100644 (file)
@@ -146,7 +146,8 @@ static int htc_config_pipe_credits(struct htc_target *target)
 {
        struct sk_buff *skb;
        struct htc_config_pipe_msg *cp_msg;
-       int ret, time_left;
+       int ret;
+       unsigned long time_left;
 
        skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
        if (!skb) {
@@ -184,7 +185,8 @@ static int htc_setup_complete(struct htc_target *target)
 {
        struct sk_buff *skb;
        struct htc_comp_msg *comp_msg;
-       int ret = 0, time_left;
+       int ret = 0;
+       unsigned long time_left;
 
        skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
        if (!skb) {
@@ -236,7 +238,8 @@ int htc_connect_service(struct htc_target *target,
        struct sk_buff *skb;
        struct htc_endpoint *endpoint;
        struct htc_conn_svc_msg *conn_msg;
-       int ret, time_left;
+       int ret;
+       unsigned long time_left;
 
        /* Find an available endpoint */
        endpoint = get_next_avail_ep(target->endpoint);
index 5e15e8e10ed39f0b605783176fe260faa387d083..a31a6804dc34eff8174b06e15d8ee14bb5405888 100644 (file)
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
                return;
        case AR9300_DEVID_QCA956X:
                ah->hw_version.macVersion = AR_SREV_VERSION_9561;
+               return;
        }
 
        val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
index eff0e5325e6a304e376b71d33d6874318e087a53..57f95f2dca5b072ac294b2c76122ec1b952a2a80 100644 (file)
@@ -736,13 +736,14 @@ static const struct ieee80211_iface_limit if_limits_multi[] = {
                                 BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                 BIT(NL80211_IFTYPE_P2P_GO) },
        { .max = 1,     .types = BIT(NL80211_IFTYPE_ADHOC) },
+       { .max = 1,     .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
 static const struct ieee80211_iface_combination if_comb_multi[] = {
        {
                .limits = if_limits_multi,
                .n_limits = ARRAY_SIZE(if_limits_multi),
-               .max_interfaces = 2,
+               .max_interfaces = 3,
                .num_different_channels = 2,
                .beacon_int_infra_match = true,
        },
@@ -826,6 +827,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
        ieee80211_hw_set(hw, SIGNAL_DBM);
        ieee80211_hw_set(hw, RX_INCLUDES_FCS);
        ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+       ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
 
        if (ath9k_ps_enable)
                ieee80211_hw_set(hw, SUPPORTS_PS);
@@ -855,6 +857,10 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                        BIT(NL80211_IFTYPE_MESH_POINT) |
                        BIT(NL80211_IFTYPE_WDS);
 
+               if (ath9k_is_chanctx_enabled())
+                       hw->wiphy->interface_modes |=
+                                       BIT(NL80211_IFTYPE_P2P_DEVICE);
+
                        hw->wiphy->iface_combinations = if_comb;
                        hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
        }
index 90631d768a60fa70a8239d529758edbbed9484c1..5ad0feeebc8669c530e6ae4aeba536f4375840e7 100644 (file)
@@ -172,7 +172,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_tx_control txctl;
-       int time_left;
+       unsigned long time_left;
 
        memset(&txctl, 0, sizeof(txctl));
        txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
index cfd45cb8ccfc13df856bb4bbe07d8e2bef21228f..c27143ba9ffbe8e6d863c0353e3ef3effd29bd5e 100644 (file)
@@ -1459,13 +1459,18 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
                                   u64 multicast)
 {
        struct ath_softc *sc = hw->priv;
+       struct ath_chanctx *ctx;
        u32 rfilt;
 
        changed_flags &= SUPPORTED_FILTERS;
        *total_flags &= SUPPORTED_FILTERS;
 
        spin_lock_bh(&sc->chan_lock);
-       sc->cur_chan->rxfilter = *total_flags;
+       ath_for_each_chanctx(sc, ctx)
+               ctx->rxfilter = *total_flags;
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+       sc->offchannel.chan.rxfilter = *total_flags;
+#endif
        spin_unlock_bh(&sc->chan_lock);
 
        ath9k_ps_wakeup(sc);
@@ -2246,7 +2251,7 @@ static void ath9k_cancel_pending_offchannel(struct ath_softc *sc)
 
                del_timer_sync(&sc->offchannel.timer);
                if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
-                       ath_roc_complete(sc, true);
+                       ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
        }
 
        if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
@@ -2355,7 +2360,7 @@ static int ath9k_cancel_remain_on_channel(struct ieee80211_hw *hw)
 
        if (sc->offchannel.roc_vif) {
                if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
-                       ath_roc_complete(sc, true);
+                       ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
        }
 
        mutex_unlock(&sc->mutex);
index 6c75fb1ab77d45ba8b6dab67a7fe6bb3b1f3dc98..d3189daf99965e6059d1ce407c99600a822ed69f 100644 (file)
@@ -491,10 +491,9 @@ bool ath_stoprecv(struct ath_softc *sc)
 
        if (!(ah->ah_flags & AH_UNPLUGGED) &&
            unlikely(!stopped)) {
-               ath_err(ath9k_hw_common(sc->sc_ah),
-                       "Could not stop RX, we could be "
-                       "confusing the DMA engine when we start RX up\n");
-               ATH_DBG_WARN_ON_ONCE(!stopped);
+               ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+                       "Failed to stop Rx DMA\n");
+               RESET_STAT_INC(sc, RESET_RX_DMA_ERROR);
        }
        return stopped && !reset;
 }
index ca533b4321bddc9ee7621ca4e3dbbe5f03377236..9c16e2a6d185e31c1391e6d4aa4f14c2c9013cd4 100644 (file)
@@ -299,7 +299,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
                       sizeof(struct wmi_cmd_hdr);
        struct sk_buff *skb;
        u8 *data;
-       int time_left, ret = 0;
+       unsigned long time_left;
+       int ret = 0;
 
        if (ah->ah_flags & AH_UNPLUGGED)
                return 0;
index 3ad79bb4f2c21c94b6c41c526a7e033e0937ed77..3e3dac3d70604fcb83b71f80fa3e19412a0cdbc3 100644 (file)
@@ -106,7 +106,6 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
 static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
                             struct ath_atx_tid *tid)
 {
-       struct ath_atx_ac *ac = tid->ac;
        struct list_head *list;
        struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
        struct ath_chanctx *ctx = avp->chanctx;
@@ -114,19 +113,9 @@ static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
        if (!ctx)
                return;
 
-       if (tid->sched)
-               return;
-
-       tid->sched = true;
-       list_add_tail(&tid->list, &ac->tid_q);
-
-       if (ac->sched)
-               return;
-
-       ac->sched = true;
-
        list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
-       list_add_tail(&ac->list, list);
+       if (list_empty(&tid->list))
+               list_add_tail(&tid->list, list);
 }
 
 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -208,7 +197,7 @@ static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
 static void
 ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_txq *txq = tid->ac->txq;
+       struct ath_txq *txq = tid->txq;
        struct ieee80211_tx_info *tx_info;
        struct sk_buff *skb, *tskb;
        struct ath_buf *bf;
@@ -237,7 +226,7 @@ ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
 
 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct ath_txq *txq = tid->ac->txq;
+       struct ath_txq *txq = tid->txq;
        struct sk_buff *skb;
        struct ath_buf *bf;
        struct list_head bf_head;
@@ -644,7 +633,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
                        ath_tx_queue_tid(sc, txq, tid);
 
                        if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
-                               tid->ac->clear_ps_filter = true;
+                               tid->clear_ps_filter = true;
                }
        }
 
@@ -734,7 +723,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
        struct ieee80211_tx_rate *rates;
        u32 max_4ms_framelen, frmlen;
        u16 aggr_limit, bt_aggr_limit, legacy = 0;
-       int q = tid->ac->txq->mac80211_qnum;
+       int q = tid->txq->mac80211_qnum;
        int i;
 
        skb = bf->bf_mpdu;
@@ -1471,8 +1460,8 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
        if (list_empty(&bf_q))
                return false;
 
-       if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
-               tid->ac->clear_ps_filter = false;
+       if (tid->clear_ps_filter || tid->an->no_ps_filter) {
+               tid->clear_ps_filter = false;
                tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
        }
 
@@ -1491,7 +1480,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
 
        an = (struct ath_node *)sta->drv_priv;
        txtid = ATH_AN_2_TID(an, tid);
-       txq = txtid->ac->txq;
+       txq = txtid->txq;
 
        ath_txq_lock(sc, txq);
 
@@ -1525,7 +1514,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
 {
        struct ath_node *an = (struct ath_node *)sta->drv_priv;
        struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
-       struct ath_txq *txq = txtid->ac->txq;
+       struct ath_txq *txq = txtid->txq;
 
        ath_txq_lock(sc, txq);
        txtid->active = false;
@@ -1538,7 +1527,6 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
                       struct ath_node *an)
 {
        struct ath_atx_tid *tid;
-       struct ath_atx_ac *ac;
        struct ath_txq *txq;
        bool buffered;
        int tidno;
@@ -1546,25 +1534,18 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-               ac = tid->ac;
-               txq = ac->txq;
+               txq = tid->txq;
 
                ath_txq_lock(sc, txq);
 
-               if (!tid->sched) {
+               if (list_empty(&tid->list)) {
                        ath_txq_unlock(sc, txq);
                        continue;
                }
 
                buffered = ath_tid_has_buffered(tid);
 
-               tid->sched = false;
-               list_del(&tid->list);
-
-               if (ac->sched) {
-                       ac->sched = false;
-                       list_del(&ac->list);
-               }
+               list_del_init(&tid->list);
 
                ath_txq_unlock(sc, txq);
 
@@ -1575,18 +1556,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
 {
        struct ath_atx_tid *tid;
-       struct ath_atx_ac *ac;
        struct ath_txq *txq;
        int tidno;
 
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-               ac = tid->ac;
-               txq = ac->txq;
+               txq = tid->txq;
 
                ath_txq_lock(sc, txq);
-               ac->clear_ps_filter = true;
+               tid->clear_ps_filter = true;
 
                if (ath_tid_has_buffered(tid)) {
                        ath_tx_queue_tid(sc, txq, tid);
@@ -1606,7 +1585,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
 
        an = (struct ath_node *)sta->drv_priv;
        tid = ATH_AN_2_TID(an, tidno);
-       txq = tid->ac->txq;
+       txq = tid->txq;
 
        ath_txq_lock(sc, txq);
 
@@ -1645,7 +1624,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
 
                tid = ATH_AN_2_TID(an, i);
 
-               ath_txq_lock(sc, tid->ac->txq);
+               ath_txq_lock(sc, tid->txq);
                while (nframes > 0) {
                        bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
                        if (!bf)
@@ -1669,7 +1648,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
                        if (an->sta && !ath_tid_has_buffered(tid))
                                ieee80211_sta_set_buffered(an->sta, i, false);
                }
-               ath_txq_unlock_complete(sc, tid->ac->txq);
+               ath_txq_unlock_complete(sc, tid->txq);
        }
 
        if (list_empty(&bf_q))
@@ -1883,8 +1862,11 @@ bool ath_drain_all_txq(struct ath_softc *sc)
                        npend |= BIT(i);
        }
 
-       if (npend)
-               ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+       if (npend) {
+               RESET_STAT_INC(sc, RESET_TX_DMA_ERROR);
+               ath_dbg(common, RESET,
+                       "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+       }
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
                if (!ATH_TXQ_SETUP(sc, i))
@@ -1915,9 +1897,8 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_atx_ac *ac, *last_ac;
        struct ath_atx_tid *tid, *last_tid;
-       struct list_head *ac_list;
+       struct list_head *tid_list;
        bool sent = false;
 
        if (txq->mac80211_qnum < 0)
@@ -1927,63 +1908,45 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                return;
 
        spin_lock_bh(&sc->chan_lock);
-       ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+       tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
 
-       if (list_empty(ac_list)) {
+       if (list_empty(tid_list)) {
                spin_unlock_bh(&sc->chan_lock);
                return;
        }
 
        rcu_read_lock();
 
-       last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
-       while (!list_empty(ac_list)) {
+       last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
+       while (!list_empty(tid_list)) {
                bool stop = false;
 
                if (sc->cur_chan->stopped)
                        break;
 
-               ac = list_first_entry(ac_list, struct ath_atx_ac, list);
-               last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
-               list_del(&ac->list);
-               ac->sched = false;
-
-               while (!list_empty(&ac->tid_q)) {
-
-                       tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
-                                              list);
-                       list_del(&tid->list);
-                       tid->sched = false;
-
-                       if (ath_tx_sched_aggr(sc, txq, tid, &stop))
-                               sent = true;
-
-                       /*
-                        * add tid to round-robin queue if more frames
-                        * are pending for the tid
-                        */
-                       if (ath_tid_has_buffered(tid))
-                               ath_tx_queue_tid(sc, txq, tid);
+               tid = list_first_entry(tid_list, struct ath_atx_tid, list);
+               list_del_init(&tid->list);
 
-                       if (stop || tid == last_tid)
-                               break;
-               }
+               if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+                       sent = true;
 
-               if (!list_empty(&ac->tid_q) && !ac->sched) {
-                       ac->sched = true;
-                       list_add_tail(&ac->list, ac_list);
-               }
+               /*
+                * add tid to round-robin queue if more frames
+                * are pending for the tid
+                */
+               if (ath_tid_has_buffered(tid))
+                       ath_tx_queue_tid(sc, txq, tid);
 
                if (stop)
                        break;
 
-               if (ac == last_ac) {
+               if (tid == last_tid) {
                        if (!sent)
                                break;
 
                        sent = false;
-                       last_ac = list_entry(ac_list->prev,
-                                            struct ath_atx_ac, list);
+                       last_tid = list_entry(tid_list->prev,
+                                             struct ath_atx_tid, list);
                }
        }
 
@@ -2373,10 +2336,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
                txq = sc->tx.uapsdq;
                ath_txq_lock(sc, txq);
        } else if (txctl->an && queue) {
-               WARN_ON(tid->ac->txq != txctl->txq);
+               WARN_ON(tid->txq != txctl->txq);
 
                if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
-                       tid->ac->clear_ps_filter = true;
+                       tid->clear_ps_filter = true;
 
                /*
                 * Add this frame to software queue for scheduling later
@@ -2470,8 +2433,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        bf = list_first_entry(&bf_q, struct ath_buf, list);
        hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
 
-       if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
-               hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+       if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
+               hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
                dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
                        sizeof(*hdr), DMA_TO_DEVICE);
        }
@@ -2870,7 +2833,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
 {
        struct ath_atx_tid *tid;
-       struct ath_atx_ac *ac;
        int tidno, acno;
 
        for (tidno = 0, tid = &an->tid[tidno];
@@ -2881,26 +2843,18 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
                tid->seq_start = tid->seq_next = 0;
                tid->baw_size  = WME_MAX_BA;
                tid->baw_head  = tid->baw_tail = 0;
-               tid->sched     = false;
                tid->active        = false;
+               tid->clear_ps_filter = true;
                __skb_queue_head_init(&tid->buf_q);
                __skb_queue_head_init(&tid->retry_q);
+               INIT_LIST_HEAD(&tid->list);
                acno = TID_TO_WME_AC(tidno);
-               tid->ac = &an->ac[acno];
-       }
-
-       for (acno = 0, ac = &an->ac[acno];
-            acno < IEEE80211_NUM_ACS; acno++, ac++) {
-               ac->sched    = false;
-               ac->clear_ps_filter = true;
-               ac->txq = sc->tx.txq_map[acno];
-               INIT_LIST_HEAD(&ac->tid_q);
+               tid->txq = sc->tx.txq_map[acno];
        }
 }
 
 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
 {
-       struct ath_atx_ac *ac;
        struct ath_atx_tid *tid;
        struct ath_txq *txq;
        int tidno;
@@ -2908,20 +2862,12 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-               ac = tid->ac;
-               txq = ac->txq;
+               txq = tid->txq;
 
                ath_txq_lock(sc, txq);
 
-               if (tid->sched) {
-                       list_del(&tid->list);
-                       tid->sched = false;
-               }
-
-               if (ac->sched) {
-                       list_del(&ac->list);
-                       tid->ac->sched = false;
-               }
+               if (!list_empty(&tid->list))
+                       list_del_init(&tid->list);
 
                ath_tid_drain(sc, txq, tid);
                tid->active = false;
index 508eccf5d982c8d821edec4fc4b8a578bf2e01e2..d59d83e0ce4b9588e5169b2b28121d82935d187b 100644 (file)
@@ -40,6 +40,8 @@ const char *ath_opmode_to_string(enum nl80211_iftype opmode)
                return "P2P-CLIENT";
        case NL80211_IFTYPE_P2P_GO:
                return "P2P-GO";
+       case NL80211_IFTYPE_OCB:
+               return "OCB";
        default:
                return "UNKNOWN";
        }
index 1b5ad1965607cd287ca211d2232da1cdcc8d73fc..cc5c592fc4c007cb7fd0fc8c6c8d05427b274123 100644 (file)
@@ -273,7 +273,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
                                tmp_false_count++;
                        }
                }
-               if (ps.count < min_count)
+               if (ps.count <= min_count)
                        /* did not reach minimum count, drop sequence */
                        continue;
 
index 050506f842e9a63298920af2f5b8700e80aaf87b..64b432625fbbdb5ad5c33d9920065937ac2f5572 100644 (file)
@@ -12,6 +12,7 @@ wil6210-y += debug.o
 wil6210-y += rx_reorder.o
 wil6210-y += ioctl.o
 wil6210-y += fw.o
+wil6210-y += pm.o
 wil6210-y += pmc.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
new file mode 100644 (file)
index 0000000..c131b5e
--- /dev/null
@@ -0,0 +1,61 @@
+/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file contains the definitions for the boot loader
+ * for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
+ */
+#ifndef BOOT_LOADER_EXPORT_H_
+#define BOOT_LOADER_EXPORT_H_
+
+struct bl_dedicated_registers_v1 {
+       __le32  boot_loader_ready;              /* 0x880A3C driver will poll
+                                                * this Dword until BL will
+                                                * set it to 1 (initial value
+                                                * should be 0)
+                                                */
+       __le32  boot_loader_struct_version;     /* 0x880A40 BL struct ver. */
+       __le16  rf_type;                        /* 0x880A44 connected RF ID */
+       __le16  rf_status;                      /* 0x880A46 RF status,
+                                                * 0 is OK else error
+                                                */
+       __le32  baseband_type;                  /* 0x880A48 board type ID */
+       u8      mac_address[6];                 /* 0x880A4c BL mac address */
+       u8      bl_version_major;               /* 0x880A52 BL ver. major */
+       u8      bl_version_minor;               /* 0x880A53 BL ver. minor */
+       __le16  bl_version_subminor;            /* 0x880A54 BL ver. subminor */
+       __le16  bl_version_build;               /* 0x880A56 BL ver. build */
+       /* valid only for version 2 and above */
+       __le32  bl_assert_code;         /* 0x880A58 BL Assert code */
+       __le32  bl_assert_blink;        /* 0x880A5C BL Assert Branch */
+       __le32  bl_reserved[22];        /* 0x880A60 - 0x880AB4 */
+       __le32  bl_magic_number;        /* 0x880AB8 BL Magic number */
+} __packed;
+
+/* the following struct is the version 0 struct */
+
+struct bl_dedicated_registers_v0 {
+       __le32  boot_loader_ready;              /* 0x880A3C driver will poll
+                                                * this Dword until BL will
+                                                * set it to 1 (initial value
+                                                * should be 0)
+                                                */
+#define BL_READY (1)   /* ready indication */
+       __le32  boot_loader_struct_version;     /* 0x880A40 BL struct ver. */
+       __le32  rf_type;                        /* 0x880A44 connected RF ID */
+       __le32  baseband_type;                  /* 0x880A48 board type ID */
+       u8      mac_address[6];                 /* 0x880A4c BL mac address */
+} __packed;
+
+#endif /* BOOT_LOADER_EXPORT_H_ */
index c79cfe02ec80a62ded454689cee393261484d5ec..20d07ef679e89d467170abae532819f36f283821 100644 (file)
@@ -336,12 +336,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
        else
                wil_dbg_misc(wil, "Scan has no IE's\n");
 
-       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
-                       request->ie);
-       if (rc) {
-               wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+       if (rc)
                goto out;
-       }
 
        rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
                        cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
@@ -462,10 +459,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
         * ies in FW.
         */
        rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
-       if (rc) {
-               wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+       if (rc)
                goto out;
-       }
 
        /* WMI_CONNECT_CMD */
        memset(&conn, 0, sizeof(conn));
@@ -722,17 +717,98 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
 {
        struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
        size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-       int rc = 0;
 
        if (bcon->probe_resp_len <= hlen)
                return 0;
 
+/* always use IE's from full probe frame, they has more info
+ * notable RSN
+ */
+       bcon->proberesp_ies = f->u.probe_resp.variable;
+       bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
        if (!bcon->assocresp_ies) {
-               bcon->assocresp_ies = f->u.probe_resp.variable;
-               bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
-               rc = 1;
+               bcon->assocresp_ies = bcon->proberesp_ies;
+               bcon->assocresp_ies_len = bcon->proberesp_ies_len;
        }
 
+       return 1;
+}
+
+/* internal functions for device reset and starting AP */
+static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
+                                struct cfg80211_beacon_data *bcon)
+{
+       int rc;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+                       bcon->proberesp_ies);
+       if (rc)
+               return rc;
+
+       rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+                       bcon->assocresp_ies);
+#if 0 /* to use beacon IE's, remove this #if 0 */
+       if (rc)
+               return rc;
+
+       rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
+#endif
+
+       return rc;
+}
+
+static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
+                                 struct net_device *ndev,
+                                 const u8 *ssid, size_t ssid_len, u32 privacy,
+                                 int bi, u8 chan,
+                                 struct cfg80211_beacon_data *bcon,
+                                 u8 hidden_ssid)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+       struct wireless_dev *wdev = ndev->ieee80211_ptr;
+       u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+       wil_set_recovery_state(wil, fw_recovery_idle);
+
+       mutex_lock(&wil->mutex);
+
+       __wil_down(wil);
+       rc = __wil_up(wil);
+       if (rc)
+               goto out;
+
+       rc = wmi_set_ssid(wil, ssid_len, ssid);
+       if (rc)
+               goto out;
+
+       rc = _wil_cfg80211_set_ies(wiphy, bcon);
+       if (rc)
+               goto out;
+
+       wil->privacy = privacy;
+       wil->channel = chan;
+       wil->hidden_ssid = hidden_ssid;
+
+       netif_carrier_on(ndev);
+
+       rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+       if (rc)
+               goto err_pcp_start;
+
+       rc = wil_bcast_init(wil);
+       if (rc)
+               goto err_bcast;
+
+       goto out; /* success */
+
+err_bcast:
+       wmi_pcp_stop(wil);
+err_pcp_start:
+       netif_carrier_off(ndev);
+out:
+       mutex_unlock(&wil->mutex);
        return rc;
 }
 
@@ -741,63 +817,50 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
                                      struct cfg80211_beacon_data *bcon)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
-       size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-       const u8 *pr_ies = NULL;
-       size_t pr_ies_len = 0;
        int rc;
+       u32 privacy = 0;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
        wil_print_bcon_data(bcon);
 
-       if (bcon->probe_resp_len > hlen) {
-               pr_ies = f->u.probe_resp.variable;
-               pr_ies_len = bcon->probe_resp_len - hlen;
-       }
-
        if (wil_fix_bcon(wil, bcon)) {
                wil_dbg_misc(wil, "Fixed bcon\n");
                wil_print_bcon_data(bcon);
        }
 
-       /* FW do not form regular beacon, so bcon IE's are not set
-        * For the DMG bcon, when it will be supported, bcon IE's will
-        * be reused; add something like:
-        * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-        * bcon->beacon_ies);
-        */
-       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
-       if (rc) {
-               wil_err(wil, "set_ie(PROBE_RESP) failed\n");
-               return rc;
-       }
+       if (bcon->proberesp_ies &&
+           cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
+                            bcon->proberesp_ies_len))
+               privacy = 1;
 
-       rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
-                       bcon->assocresp_ies_len,
-                       bcon->assocresp_ies);
-       if (rc) {
-               wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
-               return rc;
+       /* in case privacy has changed, need to restart the AP */
+       if (wil->privacy != privacy) {
+               struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+               wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
+                            wil->privacy, privacy);
+
+               rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
+                                           wdev->ssid_len, privacy,
+                                           wdev->beacon_interval,
+                                           wil->channel, bcon,
+                                           wil->hidden_ssid);
+       } else {
+               rc = _wil_cfg80211_set_ies(wiphy, bcon);
        }
 
-       return 0;
+       return rc;
 }
 
 static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                                 struct net_device *ndev,
                                 struct cfg80211_ap_settings *info)
 {
-       int rc = 0;
+       int rc;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       struct wireless_dev *wdev = ndev->ieee80211_ptr;
        struct ieee80211_channel *channel = info->chandef.chan;
        struct cfg80211_beacon_data *bcon = &info->beacon;
        struct cfg80211_crypto_settings *crypto = &info->crypto;
-       u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
-       struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
-       size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-       const u8 *pr_ies = NULL;
-       size_t pr_ies_len = 0;
        u8 hidden_ssid;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
@@ -807,6 +870,23 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                return -EINVAL;
        }
 
+       switch (info->hidden_ssid) {
+       case NL80211_HIDDEN_SSID_NOT_IN_USE:
+               hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
+               break;
+
+       case NL80211_HIDDEN_SSID_ZERO_LEN:
+               hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
+               break;
+
+       case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+               hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
+               break;
+
+       default:
+               wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
+               return -EOPNOTSUPP;
+       }
        wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
                     channel->center_freq, info->privacy ? "secure" : "open");
        wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
@@ -820,80 +900,16 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        wil_print_bcon_data(bcon);
        wil_print_crypto(wil, crypto);
 
-       if (bcon->probe_resp_len > hlen) {
-               pr_ies = f->u.probe_resp.variable;
-               pr_ies_len = bcon->probe_resp_len - hlen;
-       }
-
        if (wil_fix_bcon(wil, bcon)) {
                wil_dbg_misc(wil, "Fixed bcon\n");
                wil_print_bcon_data(bcon);
        }
 
-       wil_set_recovery_state(wil, fw_recovery_idle);
-
-       mutex_lock(&wil->mutex);
-
-       __wil_down(wil);
-       rc = __wil_up(wil);
-       if (rc)
-               goto out;
-
-       rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
-       if (rc)
-               goto out;
-
-       /* IE's */
-       /* bcon 'head IE's are not relevant for 60g band */
-       /*
-        * FW do not form regular beacon, so bcon IE's are not set
-        * For the DMG bcon, when it will be supported, bcon IE's will
-        * be reused; add something like:
-        * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-        * bcon->beacon_ies);
-        */
-       wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
-       wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
-                  bcon->assocresp_ies);
-
-       wil->privacy = info->privacy;
-
-       switch (info->hidden_ssid) {
-       case NL80211_HIDDEN_SSID_NOT_IN_USE:
-               hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
-               break;
-
-       case NL80211_HIDDEN_SSID_ZERO_LEN:
-               hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
-               break;
-
-       case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
-               hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
-               break;
-
-       default:
-               rc = -EOPNOTSUPP;
-               goto out;
-       }
-
-       netif_carrier_on(ndev);
-
-       rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
-                          channel->hw_value, hidden_ssid);
-       if (rc)
-               goto err_pcp_start;
+       rc = _wil_cfg80211_start_ap(wiphy, ndev,
+                                   info->ssid, info->ssid_len, info->privacy,
+                                   info->beacon_interval, channel->hw_value,
+                                   bcon, hidden_ssid);
 
-       rc = wil_bcast_init(wil);
-       if (rc)
-               goto err_bcast;
-
-       goto out; /* success */
-err_bcast:
-       wmi_pcp_stop(wil);
-err_pcp_start:
-       netif_carrier_off(ndev);
-out:
-       mutex_unlock(&wil->mutex);
        return rc;
 }
 
index 75219a1b8805135c5cc751db9f70c50e55ddf285..613ca2b2527be25a0c4a51329acdab30bb0e3ab3 100644 (file)
@@ -62,7 +62,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
        seq_printf(s, "  swhead = %d\n", vring->swhead);
        seq_printf(s, "  hwtail = [0x%08x] -> ", vring->hwtail);
        if (x) {
-               v = ioread32(x);
+               v = readl(x);
                seq_printf(s, "0x%08x = %d\n", v, v);
        } else {
                seq_puts(s, "???\n");
@@ -268,7 +268,7 @@ static const struct file_operations fops_mbox = {
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-       iowrite32(val, (void __iomem *)data);
+       writel(val, (void __iomem *)data);
        wmb(); /* make sure write propagated to HW */
 
        return 0;
@@ -276,7 +276,7 @@ static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-       *val = ioread32((void __iomem *)data);
+       *val = readl((void __iomem *)data);
 
        return 0;
 }
@@ -306,7 +306,7 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
-                       wil_debugfs_ulong_set, "%llu\n");
+                       wil_debugfs_ulong_set, "0x%llx\n");
 
 static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
                                               struct dentry *parent,
@@ -477,7 +477,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data)
        void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
        if (a)
-               seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+               seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
        else
                seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
@@ -1344,6 +1344,7 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
 {
        int i;
        u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+       unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
 
        seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
                   r->head_seq_num);
@@ -1353,7 +1354,10 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
                else
                        seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
        }
-       seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
+       seq_printf(s,
+                  "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
+                  r->total, drop_dup + drop_old, drop_dup, drop_old,
+                  r->ssn_last_drop);
 }
 
 static int wil_sta_debugfs_show(struct seq_file *s, void *data)
index 0ea695ff98adeda1185382bfda7b58958b579275..7053b62ca8d313ac593143bb4619ed13db252de7 100644 (file)
@@ -50,19 +50,13 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
-       tx_itr_en = ioread32(wil->csr +
-                            HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+       tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
        if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
-               tx_itr_val =
-                       ioread32(wil->csr +
-                                HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+               tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
 
-       rx_itr_en = ioread32(wil->csr +
-                            HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+       rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
        if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
-               rx_itr_val =
-                       ioread32(wil->csr +
-                                HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
+               rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
        cp->tx_coalesce_usecs = tx_itr_val;
        cp->rx_coalesce_usecs = rx_itr_val;
index 4428345e5a470360560ceb82772349cf8f754a7f..82aae2d705b41803fee7b44ab174be875b159f96 100644 (file)
 MODULE_FIRMWARE(WIL_FW_NAME);
 MODULE_FIRMWARE(WIL_FW2_NAME);
 
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
 static
 void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
                        size_t count)
index 157f5ef384e0cc2804229044f369ac813c69a28d..d30657ee7e83fa887eb54340fed64bc45db3e606 100644 (file)
@@ -221,12 +221,12 @@ static int fw_handle_direct_write(struct wil6210_priv *wil, const void *data,
 
                FW_ADDR_CHECK(dst, block[i].addr, "address");
 
-               x = ioread32(dst);
+               x = readl(dst);
                y = (x & m) | (v & ~m);
                wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
                           "(old 0x%08x val 0x%08x mask 0x%08x)\n",
                           le32_to_cpu(block[i].addr), y, x, v, m);
-               iowrite32(y, dst);
+               writel(y, dst);
                wmb(); /* finish before processing next record */
        }
 
@@ -239,18 +239,18 @@ static int gw_write(struct wil6210_priv *wil, void __iomem *gwa_addr,
 {
        unsigned delay = 0;
 
-       iowrite32(a, gwa_addr);
-       iowrite32(gw_cmd, gwa_cmd);
+       writel(a, gwa_addr);
+       writel(gw_cmd, gwa_cmd);
        wmb(); /* finish before activate gw */
 
-       iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
+       writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
        do {
                udelay(1); /* typical time is few usec */
                if (delay++ > 100) {
                        wil_err_fw(wil, "gw timeout\n");
                        return -EINVAL;
                }
-       } while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
+       } while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
 
        return 0;
 }
@@ -305,7 +305,7 @@ static int fw_handle_gateway_data(struct wil6210_priv *wil, const void *data,
                wil_dbg_fw(wil, "  gw write[%3d] [0x%08x] <== 0x%08x\n",
                           i, a, v);
 
-               iowrite32(v, gwa_val);
+               writel(v, gwa_val);
                rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
                if (rc)
                        return rc;
@@ -372,7 +372,7 @@ static int fw_handle_gateway_data4(struct wil6210_priv *wil, const void *data,
                                sizeof(v), false);
 
                for (k = 0; k < ARRAY_SIZE(block->value); k++)
-                       iowrite32(v[k], gwa_val[k]);
+                       writel(v[k], gwa_val[k]);
                rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
                if (rc)
                        return rc;
index 28ffc18466c4b1e1d2887f1887fc3decf36e73e2..a371f036d0546388c3bcfa6317e5d9e4ca6857f8 100644 (file)
@@ -61,13 +61,13 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr)
 
 static inline void wil_icr_clear(u32 x, void __iomem *addr)
 {
-       iowrite32(x, addr);
+       writel(x, addr);
 }
 #endif /* defined(CONFIG_WIL6210_ISR_COR) */
 
 static inline u32 wil_ioread32_and_clear(void __iomem *addr)
 {
-       u32 x = ioread32(addr);
+       u32 x = readl(addr);
 
        wil_icr_clear(x, addr);
 
@@ -76,54 +76,47 @@ static inline u32 wil_ioread32_and_clear(void __iomem *addr)
 
 static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                 offsetof(struct RGF_ICR, IMS));
+       wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
+             WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                 offsetof(struct RGF_ICR, IMS));
+       wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
+             WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                 offsetof(struct RGF_ICR, IMS));
+       wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+             WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
 {
        wil_dbg_irq(wil, "%s()\n", __func__);
 
-       iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-                 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+       wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
 
        clear_bit(wil_status_irqen, wil->status);
 }
 
 void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IMC_TX, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                 offsetof(struct RGF_ICR, IMC));
+       wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
+             WIL6210_IMC_TX);
 }
 
 void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IMC_RX, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                 offsetof(struct RGF_ICR, IMC));
+       wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
+             WIL6210_IMC_RX);
 }
 
 static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
 {
-       iowrite32(WIL6210_IMC_MISC, wil->csr +
-                 HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                 offsetof(struct RGF_ICR, IMC));
+       wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+             WIL6210_IMC_MISC);
 }
 
 static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@@ -132,8 +125,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
 
        set_bit(wil_status_irqen, wil->status);
 
-       iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
-                 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+       wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
 }
 
 void wil_mask_irq(struct wil6210_priv *wil)
@@ -150,12 +142,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
 {
        wil_dbg_irq(wil, "%s()\n", __func__);
 
-       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                 offsetof(struct RGF_ICR, ICC));
-       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                 offsetof(struct RGF_ICR, ICC));
-       iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                 offsetof(struct RGF_ICR, ICC));
+       wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+             WIL_ICR_ICC_VALUE);
+       wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+             WIL_ICR_ICC_VALUE);
+       wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
+             WIL_ICR_ICC_VALUE);
 
        wil6210_unmask_irq_pseudo(wil);
        wil6210_unmask_irq_tx(wil);
@@ -163,9 +155,6 @@ void wil_unmask_irq(struct wil6210_priv *wil)
        wil6210_unmask_irq_misc(wil);
 }
 
-/* target write operation */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 {
        wil_dbg_irq(wil, "%s()\n", __func__);
@@ -177,44 +166,42 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
                return;
 
        /* Disable and clear tx counter before (re)configuration */
-       W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
-       W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+       wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+       wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
        wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
                 wil->tx_max_burst_duration);
        /* Configure TX max burst duration timer to use usec units */
-       W(RGF_DMA_ITR_TX_CNT_CTL,
-         BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+       wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
+             BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
 
        /* Disable and clear tx idle counter before (re)configuration */
-       W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
-       W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+       wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+       wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
        wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
                 wil->tx_interframe_timeout);
        /* Configure TX max burst duration timer to use usec units */
-       W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
-                                     BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+       wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+             BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
 
        /* Disable and clear rx counter before (re)configuration */
-       W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
-       W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+       wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+       wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
        wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
                 wil->rx_max_burst_duration);
        /* Configure TX max burst duration timer to use usec units */
-       W(RGF_DMA_ITR_RX_CNT_CTL,
-         BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+       wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
+             BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
 
        /* Disable and clear rx idle counter before (re)configuration */
-       W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
-       W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+       wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+       wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
        wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
                 wil->rx_interframe_timeout);
        /* Configure TX max burst duration timer to use usec units */
-       W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
-                                     BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+       wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+             BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
 }
 
-#undef W
-
 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 {
        struct wil6210_priv *wil = cookie;
@@ -452,27 +439,24 @@ static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
                u32 icr_rx = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_RX_ICR) +
                                offsetof(struct RGF_ICR, ICR));
-               u32 imv_rx = ioread32(wil->csr +
-                               HOSTADDR(RGF_DMA_EP_RX_ICR) +
-                               offsetof(struct RGF_ICR, IMV));
+               u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+                                  offsetof(struct RGF_ICR, IMV));
                u32 icm_tx = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_TX_ICR) +
                                offsetof(struct RGF_ICR, ICM));
                u32 icr_tx = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_TX_ICR) +
                                offsetof(struct RGF_ICR, ICR));
-               u32 imv_tx = ioread32(wil->csr +
-                               HOSTADDR(RGF_DMA_EP_TX_ICR) +
-                               offsetof(struct RGF_ICR, IMV));
+               u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+                                  offsetof(struct RGF_ICR, IMV));
                u32 icm_misc = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_MISC_ICR) +
                                offsetof(struct RGF_ICR, ICM));
                u32 icr_misc = wil_ioread32_and_clear(wil->csr +
                                HOSTADDR(RGF_DMA_EP_MISC_ICR) +
                                offsetof(struct RGF_ICR, ICR));
-               u32 imv_misc = ioread32(wil->csr +
-                               HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-                               offsetof(struct RGF_ICR, IMV));
+               u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+                                    offsetof(struct RGF_ICR, IMV));
                wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
                                "Rx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
                                "Tx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@@ -492,7 +476,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
 {
        irqreturn_t rc = IRQ_HANDLED;
        struct wil6210_priv *wil = cookie;
-       u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+       u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
 
        /**
         * pseudo_cause is Clear-On-Read, no need to ACK
@@ -541,48 +525,12 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
        return rc;
 }
 
-static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
-{
-       int rc;
-       /*
-        * IRQ's are in the following order:
-        * - Tx
-        * - Rx
-        * - Misc
-        */
-
-       rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
-                        WIL_NAME"_tx", wil);
-       if (rc)
-               return rc;
-
-       rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
-                        WIL_NAME"_rx", wil);
-       if (rc)
-               goto free0;
-
-       rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
-                                 wil6210_irq_misc_thread,
-                                 IRQF_SHARED, WIL_NAME"_misc", wil);
-       if (rc)
-               goto free1;
-
-       return 0;
-       /* error branch */
-free1:
-       free_irq(irq + 1, wil);
-free0:
-       free_irq(irq, wil);
-
-       return rc;
-}
-
 /* can't use wil_ioread32_and_clear because ICC value is not set yet */
 static inline void wil_clear32(void __iomem *addr)
 {
-       u32 x = ioread32(addr);
+       u32 x = readl(addr);
 
-       iowrite32(x, addr);
+       writel(x, addr);
 }
 
 void wil6210_clear_irq(struct wil6210_priv *wil)
@@ -596,19 +544,16 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
        wmb(); /* make sure write completed */
 }
 
-int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
 {
        int rc;
 
-       wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
+       wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
 
-       if (wil->n_msi == 3)
-               rc = wil6210_request_3msi(wil, irq);
-       else
-               rc = request_threaded_irq(irq, wil6210_hardirq,
-                                         wil6210_thread_irq,
-                                         wil->n_msi ? 0 : IRQF_SHARED,
-                                         WIL_NAME, wil);
+       rc = request_threaded_irq(irq, wil6210_hardirq,
+                                 wil6210_thread_irq,
+                                 use_msi ? 0 : IRQF_SHARED,
+                                 WIL_NAME, wil);
        return rc;
 }
 
@@ -618,8 +563,4 @@ void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
 
        wil_mask_irq(wil);
        free_irq(irq, wil);
-       if (wil->n_msi == 3) {
-               free_irq(irq + 1, wil);
-               free_irq(irq + 2, wil);
-       }
 }
index e9c0673819c624613ce25e5612f7886b592424d8..f7f9486219516f3d8d713d0377fa574bb5eb1ec9 100644 (file)
@@ -76,11 +76,11 @@ static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
        /* operation */
        switch (io.op & wil_mmio_op_mask) {
        case wil_mmio_read:
-               io.val = ioread32(a);
+               io.val = readl(a);
                need_copy = true;
                break;
        case wil_mmio_write:
-               iowrite32(io.val, a);
+               writel(io.val, a);
                wmb(); /* make sure write propagated to HW */
                break;
        default:
index 6ca6193ab8a6100ac6257e24745d4575e17648a9..2fb04c51da53f2dd4fd58f7995eee85a1a93dcd9 100644 (file)
@@ -21,6 +21,7 @@
 #include "wil6210.h"
 #include "txrx.h"
 #include "wmi.h"
+#include "boot_loader.h"
 
 #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
@@ -270,8 +271,7 @@ static void wil_scan_timer_fn(ulong x)
 
        clear_bit(wil_status_fwready, wil->status);
        wil_err(wil, "Scan timeout detected, start fw error recovery\n");
-       wil->recovery_state = fw_recovery_pending;
-       schedule_work(&wil->fw_error_worker);
+       wil_fw_error_recovery(wil);
 }
 
 static int wil_wait_for_recovery(struct wil6210_priv *wil)
@@ -528,26 +528,16 @@ void wil_priv_deinit(struct wil6210_priv *wil)
        destroy_workqueue(wil->wmi_wq);
 }
 
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
 static inline void wil_halt_cpu(struct wil6210_priv *wil)
 {
-       W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
-       W(RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
+       wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+       wil_w(wil, RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
 }
 
 static inline void wil_release_cpu(struct wil6210_priv *wil)
 {
        /* Start CPU */
-       W(RGF_USER_USER_CPU_0, 1);
+       wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
 static int wil_target_reset(struct wil6210_priv *wil)
@@ -558,56 +548,60 @@ static int wil_target_reset(struct wil6210_priv *wil)
        wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
        /* Clear MAC link up */
-       S(RGF_HP_CTRL, BIT(15));
-       S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
-       S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+       wil_s(wil, RGF_HP_CTRL, BIT(15));
+       wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
+       wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
 
        wil_halt_cpu(wil);
 
        /* clear all boot loader "ready" bits */
-       W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
+       wil_w(wil, RGF_USER_BL +
+             offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
        /* Clear Fw Download notification */
-       C(RGF_USER_USAGE_6, BIT(0));
+       wil_c(wil, RGF_USER_USAGE_6, BIT(0));
 
-       S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+       wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
        /* XTAL stabilization should take about 3ms */
        usleep_range(5000, 7000);
-       x = R(RGF_CAF_PLL_LOCK_STATUS);
+       x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
        if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
                wil_err(wil, "Xtal stabilization timeout\n"
                        "RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
                return -ETIME;
        }
        /* switch 10k to XTAL*/
-       C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+       wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
        /* 40 MHz */
-       C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+       wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
 
-       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
-       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+       wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+       wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
 
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
 
-       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
-       W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
+       wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+       wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
 
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+       /* reset A2 PCIE AHB */
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
 
-       W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+       wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
        /* wait until device ready. typical time is 20..80 msec */
        do {
                msleep(RST_DELAY);
-               x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+               x = wil_r(wil, RGF_USER_BL +
+                         offsetof(struct bl_dedicated_registers_v0,
+                                  boot_loader_ready));
                if (x1 != x) {
                        wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
                        x1 = x;
@@ -617,13 +611,13 @@ static int wil_target_reset(struct wil6210_priv *wil)
                                x);
                        return -ETIME;
                }
-       } while (x != BIT_BL_READY);
+       } while (x != BL_READY);
 
-       C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+       wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
        /* enable fix for HW bug related to the SA/DA swap in AP Rx */
-       S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
-         BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+       wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+             BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
 
        wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
        return 0;
@@ -641,29 +635,93 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
 static int wil_get_bl_info(struct wil6210_priv *wil)
 {
        struct net_device *ndev = wil_to_ndev(wil);
-       struct RGF_BL bl;
-
-       wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
-       le32_to_cpus(&bl.ready);
-       le32_to_cpus(&bl.version);
-       le32_to_cpus(&bl.rf_type);
-       le32_to_cpus(&bl.baseband_type);
+       union {
+               struct bl_dedicated_registers_v0 bl0;
+               struct bl_dedicated_registers_v1 bl1;
+       } bl;
+       u32 bl_ver;
+       u8 *mac;
+       u16 rf_status;
+
+       wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
+                            sizeof(bl));
+       bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
+       mac = bl.bl0.mac_address;
+
+       if (bl_ver == 0) {
+               le32_to_cpus(&bl.bl0.rf_type);
+               le32_to_cpus(&bl.bl0.baseband_type);
+               rf_status = 0; /* actually, unknown */
+               wil_info(wil,
+                        "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+                        bl_ver, mac,
+                        bl.bl0.rf_type, bl.bl0.baseband_type);
+               wil_info(wil, "Boot Loader build unknown for struct v0\n");
+       } else {
+               le16_to_cpus(&bl.bl1.rf_type);
+               rf_status = le16_to_cpu(bl.bl1.rf_status);
+               le32_to_cpus(&bl.bl1.baseband_type);
+               le16_to_cpus(&bl.bl1.bl_version_subminor);
+               le16_to_cpus(&bl.bl1.bl_version_build);
+               wil_info(wil,
+                        "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
+                        bl_ver, mac,
+                        bl.bl1.rf_type, rf_status,
+                        bl.bl1.baseband_type);
+               wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
+                        bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
+                        bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
+       }
 
-       if (!is_valid_ether_addr(bl.mac_address)) {
-               wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+       if (!is_valid_ether_addr(mac)) {
+               wil_err(wil, "BL: Invalid MAC %pM\n", mac);
                return -EINVAL;
        }
 
-       ether_addr_copy(ndev->perm_addr, bl.mac_address);
+       ether_addr_copy(ndev->perm_addr, mac);
        if (!is_valid_ether_addr(ndev->dev_addr))
-               ether_addr_copy(ndev->dev_addr, bl.mac_address);
-       wil_info(wil,
-                "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
-                bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+               ether_addr_copy(ndev->dev_addr, mac);
+
+       if (rf_status) {/* bad RF cable? */
+               wil_err(wil, "RF communication error 0x%04x",
+                       rf_status);
+               return -EAGAIN;
+       }
 
        return 0;
 }
 
+static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
+{
+       u32 bl_assert_code, bl_assert_blink, bl_magic_number;
+       u32 bl_ver = wil_r(wil, RGF_USER_BL +
+                          offsetof(struct bl_dedicated_registers_v0,
+                                   boot_loader_struct_version));
+
+       if (bl_ver < 2)
+               return;
+
+       bl_assert_code = wil_r(wil, RGF_USER_BL +
+                              offsetof(struct bl_dedicated_registers_v1,
+                                       bl_assert_code));
+       bl_assert_blink = wil_r(wil, RGF_USER_BL +
+                               offsetof(struct bl_dedicated_registers_v1,
+                                        bl_assert_blink));
+       bl_magic_number = wil_r(wil, RGF_USER_BL +
+                               offsetof(struct bl_dedicated_registers_v1,
+                                        bl_magic_number));
+
+       if (is_err) {
+               wil_err(wil,
+                       "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+                       bl_assert_code, bl_assert_blink, bl_magic_number);
+       } else {
+               wil_dbg_misc(wil,
+                            "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+                            bl_assert_code, bl_assert_blink, bl_magic_number);
+       }
+}
+
 static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
 {
        ulong to = msecs_to_jiffies(1000);
@@ -690,9 +748,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
-       if (wil->hw_version == HW_VER_UNKNOWN)
-               return -ENODEV;
-
        WARN_ON(!mutex_is_locked(&wil->mutex));
        WARN_ON(test_bit(wil_status_napi_en, wil->status));
 
@@ -707,6 +762,9 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                return 0;
        }
 
+       if (wil->hw_version == HW_VER_UNKNOWN)
+               return -ENODEV;
+
        cancel_work_sync(&wil->disconnect_worker);
        wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
        wil_bcast_fini(wil);
@@ -729,12 +787,17 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        flush_workqueue(wil->wq_service);
        flush_workqueue(wil->wmi_wq);
 
+       wil_bl_crash_info(wil, false);
        rc = wil_target_reset(wil);
        wil_rx_fini(wil);
-       if (rc)
+       if (rc) {
+               wil_bl_crash_info(wil, true);
                return rc;
+       }
 
        rc = wil_get_bl_info(wil);
+       if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
+               rc = 0;
        if (rc)
                return rc;
 
@@ -752,7 +815,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                        return rc;
 
                /* Mark FW as loaded from host */
-               S(RGF_USER_USAGE_6, 1);
+               wil_s(wil, RGF_USER_USAGE_6, 1);
 
                /* clear any interrupts which on-card-firmware
                 * may have set
@@ -760,8 +823,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                wil6210_clear_irq(wil);
                /* CAF_ICR - clear and mask */
                /* it is W1C, clear by writing back same value */
-               S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
-               W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+               wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+               wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
 
                wil_release_cpu(wil);
        }
@@ -785,11 +848,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        return rc;
 }
 
-#undef R
-#undef W
-#undef S
-#undef C
-
 void wil_fw_error_recovery(struct wil6210_priv *wil)
 {
        wil_dbg_misc(wil, "starting fw error recovery\n");
index 8ef18ace110ffffde2e4a954c20c2a0e0632858e..e3b3c8fb4605502a8c7cf80bbf12b1473a69388c 100644 (file)
@@ -173,7 +173,10 @@ void *wil_if_alloc(struct device *dev)
        wil_set_ethtoolops(ndev);
        ndev->ieee80211_ptr = wdev;
        ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
-                           NETIF_F_SG | NETIF_F_GRO;
+                           NETIF_F_SG | NETIF_F_GRO |
+                           NETIF_F_TSO | NETIF_F_TSO6 |
+                           NETIF_F_RXHASH;
+
        ndev->features |= ndev->hw_features;
        SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
        wdev->netdev = ndev;
index aa3ecc607ca31abae2879bea058a2505006e1495..feff1ef10fb3d757fc0f00a0fde7a4e127016918 100644 (file)
 
 #include "wil6210.h"
 
-static int use_msi = 1;
-module_param(use_msi, int, S_IRUGO);
-MODULE_PARM_DESC(use_msi,
-                " Use MSI interrupt: "
-                "0 - don't, 1 - (default) - single, or 3");
+static bool use_msi = true;
+module_param(use_msi, bool, S_IRUGO);
+MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
-       u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
+       u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 
        bitmap_zero(wil->hw_capabilities, hw_capability_last);
 
@@ -50,24 +48,12 @@ void wil_set_capabilities(struct wil6210_priv *wil)
 
 void wil_disable_irq(struct wil6210_priv *wil)
 {
-       int irq = wil->pdev->irq;
-
-       disable_irq(irq);
-       if (wil->n_msi == 3) {
-               disable_irq(irq + 1);
-               disable_irq(irq + 2);
-       }
+       disable_irq(wil->pdev->irq);
 }
 
 void wil_enable_irq(struct wil6210_priv *wil)
 {
-       int irq = wil->pdev->irq;
-
-       enable_irq(irq);
-       if (wil->n_msi == 3) {
-               enable_irq(irq + 1);
-               enable_irq(irq + 2);
-       }
+       enable_irq(wil->pdev->irq);
 }
 
 /* Bus ops */
@@ -80,6 +66,7 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
         * and only MSI should be used
         */
        int msi_only = pdev->msi_enabled;
+       bool _use_msi = use_msi;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
 
@@ -87,41 +74,20 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
 
        pci_set_master(pdev);
 
-       /*
-        * how many MSI interrupts to request?
-        */
-       switch (use_msi) {
-       case 3:
-       case 1:
-               wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
-               break;
-       case 0:
-               wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
-               break;
-       default:
-               wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
-               use_msi = 1;
-       }
-
-       if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
-               wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
-               use_msi = 1;
-       }
+       wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
 
-       if (use_msi == 1 && pci_enable_msi(pdev)) {
+       if (use_msi && pci_enable_msi(pdev)) {
                wil_err(wil, "pci_enable_msi failed, use INTx\n");
-               use_msi = 0;
+               _use_msi = false;
        }
 
-       wil->n_msi = use_msi;
-
-       if ((wil->n_msi == 0) && msi_only) {
+       if (!_use_msi && msi_only) {
                wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
                rc = -ENODEV;
                goto stop_master;
        }
 
-       rc = wil6210_init_irq(wil, pdev->irq);
+       rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
        if (rc)
                goto stop_master;
 
@@ -293,11 +259,80 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
+#ifdef CONFIG_PM
+
+static int wil6210_suspend(struct device *dev, bool is_runtime)
+{
+       int rc = 0;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       rc = wil_can_suspend(wil, is_runtime);
+       if (rc)
+               goto out;
+
+       rc = wil_suspend(wil, is_runtime);
+       if (rc)
+               goto out;
+
+       /* TODO: how do I bring card in low power state? */
+
+       /* disable bus mastering */
+       pci_clear_master(pdev);
+       /* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
+
+out:
+       return rc;
+}
+
+static int wil6210_resume(struct device *dev, bool is_runtime)
+{
+       int rc = 0;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       /* allow master */
+       pci_set_master(pdev);
+
+       rc = wil_resume(wil, is_runtime);
+       if (rc)
+               pci_clear_master(pdev);
+
+       return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wil6210_pm_suspend(struct device *dev)
+{
+       return wil6210_suspend(dev, false);
+}
+
+static int wil6210_pm_resume(struct device *dev)
+{
+       return wil6210_resume(dev, false);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops wil6210_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+};
+
 static struct pci_driver wil6210_driver = {
        .probe          = wil_pcie_probe,
        .remove         = wil_pcie_remove,
        .id_table       = wil6210_pcie_ids,
        .name           = WIL_NAME,
+       .driver         = {
+               .pm = &wil6210_pm_ops,
+       },
 };
 
 static int __init wil6210_driver_init(void)
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
new file mode 100644 (file)
index 0000000..0b7ecbc
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+       int rc = 0;
+       struct wireless_dev *wdev = wil->wdev;
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       switch (wdev->iftype) {
+       case NL80211_IFTYPE_MONITOR:
+       case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_CLIENT:
+               break;
+       /* AP-like interface - can't suspend */
+       default:
+               wil_dbg_pm(wil, "AP-like interface\n");
+               rc = -EBUSY;
+               break;
+       }
+
+       wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+                  is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+
+       return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+       int rc = 0;
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       /* if netif up, hardware is alive, shut it down */
+       if (ndev->flags & IFF_UP) {
+               rc = wil_down(wil);
+               if (rc) {
+                       wil_err(wil, "wil_down : %d\n", rc);
+                       goto out;
+               }
+       }
+
+       if (wil->platform_ops.suspend)
+               rc = wil->platform_ops.suspend(wil->platform_handle);
+
+out:
+       wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+                  is_runtime ? "runtime" : "system", rc);
+       return rc;
+}
+
+int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+{
+       int rc = 0;
+       struct net_device *ndev = wil_to_ndev(wil);
+
+       wil_dbg_pm(wil, "%s(%s)\n", __func__,
+                  is_runtime ? "runtime" : "system");
+
+       if (wil->platform_ops.resume) {
+               rc = wil->platform_ops.resume(wil->platform_handle);
+               if (rc) {
+                       wil_err(wil, "platform_ops.resume : %d\n", rc);
+                       goto out;
+               }
+       }
+
+       /* if netif up, bring hardware up
+        * During open(), IFF_UP set after actual device method
+        * invocation. This prevent recursive call to wil_up()
+        */
+       if (ndev->flags & IFF_UP)
+               rc = wil_up(wil);
+
+out:
+       wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+                  is_runtime ? "runtime" : "system", rc);
+       return rc;
+}
index ca10dcf0986eaa39faeed4f61d8a90189a224376..9238c1ac23dd0311509b6d769e00ee70ce18ec81 100644 (file)
@@ -121,6 +121,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
                goto out;
        }
 
+       r->total++;
        hseq = r->head_seq_num;
 
        /** Due to the race between WMI events, where BACK establishment
@@ -153,6 +154,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        /* frame with out of date sequence number */
        if (seq_less(seq, r->head_seq_num)) {
                r->ssn_last_drop = seq;
+               r->drop_old++;
+               wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
+                            seq, r->head_seq_num);
                dev_kfree_skb(skb);
                goto out;
        }
@@ -173,6 +177,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
 
        /* check if we already stored this frame */
        if (r->reorder_buf[index]) {
+               r->drop_dup++;
+               wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
                dev_kfree_skb(skb);
                goto out;
        }
index aa20af86e1d61e790a4ba054c9db383ec2968b15..6229110d558a1a043566091859144b582c34c6c9 100644 (file)
@@ -509,7 +509,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
                        break;
                }
        }
-       iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+       wil_w(wil, v->hwtail, v->swtail);
 
        return rc;
 }
@@ -541,6 +541,14 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
                [GRO_DROP]              = "GRO_DROP",
        };
 
+       if (ndev->features & NETIF_F_RXHASH)
+               /* fake L4 to ensure it won't be re-calculated later
+                * set hash to any non-zero value to activate rps
+                * mechanism, core will be chosen according
+                * to user-level rps configuration.
+                */
+               skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
+
        skb_orphan(skb);
 
        if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
@@ -1058,14 +1066,52 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
 static inline
 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
 {
-       d->mac.d[2] |= ((nr_frags + 1) <<
-                      MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+       d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
 }
 
-static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
-                                        struct vring_tx_desc *d,
-                                        struct sk_buff *skb)
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+
+static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
+                                         struct sk_buff *skb,
+                                         int tso_desc_type, bool is_ipv4,
+                                         int tcp_hdr_len, int skb_net_hdr_len)
 {
+       d->dma.b11 = ETH_HLEN; /* MAC header length */
+       d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+
+       d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+       /* L4 header len: TCP header length */
+       d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+
+       /* Setup TSO: bit and desc type */
+       d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
+               (tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
+       d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
+
+       d->dma.ip_length = skb_net_hdr_len;
+       /* Enable TCP/UDP checksum */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+       /* Calculate pseudo-header */
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+}
+
+/**
+ * Sets the descriptor @d up for csum. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
+ * Note, if d==NULL, the function only returns the protocol result.
+ *
+ * It is very similar to previous wil_tx_desc_offload_setup_tso. This
+ * is "if unrolling" to optimize the critical path.
+ */
+
+static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
+                                    struct sk_buff *skb){
        int protocol;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1110,6 +1156,305 @@ static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
        return 0;
 }
 
+static inline void wil_tx_last_desc(struct vring_tx_desc *d)
+{
+       d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
+             BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
+             BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+}
+
+static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
+{
+       d->dma.d0 |= wil_tso_type_lst <<
+                 DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
+}
+
+static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
+                             struct sk_buff *skb)
+{
+       struct device *dev = wil_to_dev(wil);
+
+       /* point to descriptors in shared memory */
+       volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
+                                     *_first_desc = NULL;
+
+       /* pointers to shadow descriptors */
+       struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
+                            *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
+                            *first_desc = &first_desc_mem;
+
+       /* pointer to shadow descriptors' context */
+       struct wil_ctx *hdr_ctx, *first_ctx = NULL;
+
+       int descs_used = 0; /* total number of used descriptors */
+       int sg_desc_cnt = 0; /* number of descriptors for current mss*/
+
+       u32 swhead = vring->swhead;
+       int used, avail = wil_vring_avail_tx(vring);
+       int nr_frags = skb_shinfo(skb)->nr_frags;
+       int min_desc_required = nr_frags + 1;
+       int mss = skb_shinfo(skb)->gso_size;    /* payload size w/o headers */
+       int f, len, hdrlen, headlen;
+       int vring_index = vring - wil->vring_tx;
+       struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+       uint i = swhead;
+       dma_addr_t pa;
+       const skb_frag_t *frag = NULL;
+       int rem_data = mss;
+       int lenmss;
+       int hdr_compensation_need = true;
+       int desc_tso_type = wil_tso_type_first;
+       bool is_ipv4;
+       int tcp_hdr_len;
+       int skb_net_hdr_len;
+       int gso_type;
+
+       wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+                    __func__, skb->len, vring_index);
+
+       if (unlikely(!txdata->enabled))
+               return -EINVAL;
+
+       /* A typical page 4K is 3-4 payloads, we assume each fragment
+        * is a full payload, that's how min_desc_required has been
+        * calculated. In real we might need more or less descriptors,
+        * this is the initial check only.
+        */
+       if (unlikely(avail < min_desc_required)) {
+               wil_err_ratelimited(wil,
+                                   "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+                                   vring_index, min_desc_required);
+               return -ENOMEM;
+       }
+
+       /* Header Length = MAC header len + IP header len + TCP header len*/
+       hdrlen = ETH_HLEN +
+               (int)skb_network_header_len(skb) +
+               tcp_hdrlen(skb);
+
+       gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+       switch (gso_type) {
+       case SKB_GSO_TCPV4:
+               /* TCP v4, zero out the IP length and IPv4 checksum fields
+                * as required by the offloading doc
+                */
+               ip_hdr(skb)->tot_len = 0;
+               ip_hdr(skb)->check = 0;
+               is_ipv4 = true;
+               break;
+       case SKB_GSO_TCPV6:
+               /* TCP v6, zero out the payload length */
+               ipv6_hdr(skb)->payload_len = 0;
+               is_ipv4 = false;
+               break;
+       default:
+               /* other than TCPv4 or TCPv6 types are not supported for TSO.
+                * It is also illegal for both to be set simultaneously
+                */
+               return -EINVAL;
+       }
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return -EINVAL;
+
+       /* tcp header length and skb network header length are fixed for all
+        * packet's descriptors - read then once here
+        */
+       tcp_hdr_len = tcp_hdrlen(skb);
+       skb_net_hdr_len = skb_network_header_len(skb);
+
+       _hdr_desc = &vring->va[i].tx;
+
+       pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, pa))) {
+               wil_err(wil, "TSO: Skb head DMA map error\n");
+               goto err_exit;
+       }
+
+       wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+       wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
+                                     tcp_hdr_len, skb_net_hdr_len);
+       wil_tx_last_desc(hdr_desc);
+
+       vring->ctx[i].mapped_as = wil_mapped_as_single;
+       hdr_ctx = &vring->ctx[i];
+
+       descs_used++;
+       headlen = skb_headlen(skb) - hdrlen;
+
+       for (f = headlen ? -1 : 0; f < nr_frags; f++)  {
+               if (headlen) {
+                       len = headlen;
+                       wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
+                                    len);
+               } else {
+                       frag = &skb_shinfo(skb)->frags[f];
+                       len = frag->size;
+                       wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
+               }
+
+               while (len) {
+                       wil_dbg_txrx(wil,
+                                    "TSO: len %d, rem_data %d, descs_used %d\n",
+                                    len, rem_data, descs_used);
+
+                       if (descs_used == avail)  {
+                               wil_err(wil, "TSO: ring overflow\n");
+                               goto dma_error;
+                       }
+
+                       lenmss = min_t(int, rem_data, len);
+                       i = (swhead + descs_used) % vring->size;
+                       wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
+
+                       if (!headlen) {
+                               pa = skb_frag_dma_map(dev, frag,
+                                                     frag->size - len, lenmss,
+                                                     DMA_TO_DEVICE);
+                               vring->ctx[i].mapped_as = wil_mapped_as_page;
+                       } else {
+                               pa = dma_map_single(dev,
+                                                   skb->data +
+                                                   skb_headlen(skb) - headlen,
+                                                   lenmss,
+                                                   DMA_TO_DEVICE);
+                               vring->ctx[i].mapped_as = wil_mapped_as_single;
+                               headlen -= lenmss;
+                       }
+
+                       if (unlikely(dma_mapping_error(dev, pa)))
+                               goto dma_error;
+
+                       _desc = &vring->va[i].tx;
+
+                       if (!_first_desc) {
+                               _first_desc = _desc;
+                               first_ctx = &vring->ctx[i];
+                               d = first_desc;
+                       } else {
+                               d = &desc_mem;
+                       }
+
+                       wil_tx_desc_map(d, pa, lenmss, vring_index);
+                       wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
+                                                     is_ipv4, tcp_hdr_len,
+                                                     skb_net_hdr_len);
+
+                       /* use tso_type_first only once */
+                       desc_tso_type = wil_tso_type_mid;
+
+                       descs_used++;  /* desc used so far */
+                       sg_desc_cnt++; /* desc used for this segment */
+                       len -= lenmss;
+                       rem_data -= lenmss;
+
+                       wil_dbg_txrx(wil,
+                                    "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
+                                    len, rem_data, descs_used, sg_desc_cnt);
+
+                       /* Close the segment if reached mss size or last frag*/
+                       if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
+                               if (hdr_compensation_need) {
+                                       /* first segment include hdr desc for
+                                        * release
+                                        */
+                                       hdr_ctx->nr_frags = sg_desc_cnt;
+                                       wil_tx_desc_set_nr_frags(first_desc,
+                                                                sg_desc_cnt +
+                                                                1);
+                                       hdr_compensation_need = false;
+                               } else {
+                                       wil_tx_desc_set_nr_frags(first_desc,
+                                                                sg_desc_cnt);
+                               }
+                               first_ctx->nr_frags = sg_desc_cnt - 1;
+
+                               wil_tx_last_desc(d);
+
+                               /* first descriptor may also be the last
+                                * for this mss - make sure not to copy
+                                * it twice
+                                */
+                               if (first_desc != d)
+                                       *_first_desc = *first_desc;
+
+                               /*last descriptor will be copied at the end
+                                * of this TS processing
+                                */
+                               if (f < nr_frags - 1 || len > 0)
+                                       *_desc = *d;
+
+                               rem_data = mss;
+                               _first_desc = NULL;
+                               sg_desc_cnt = 0;
+                       } else if (first_desc != d) /* update mid descriptor */
+                                       *_desc = *d;
+               }
+       }
+
+       /* first descriptor may also be the last.
+        * in this case d pointer is invalid
+        */
+       if (_first_desc == _desc)
+               d = first_desc;
+
+       /* Last data descriptor */
+       wil_set_tx_desc_last_tso(d);
+       *_desc = *d;
+
+       /* Fill the total number of descriptors in first desc (hdr)*/
+       wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
+       *_hdr_desc = *hdr_desc;
+
+       /* hold reference to skb
+        * to prevent skb release before accounting
+        * in case of immediate "tx done"
+        */
+       vring->ctx[i].skb = skb_get(skb);
+
+       /* performance monitoring */
+       used = wil_vring_used_tx(vring);
+       if (wil_val_in_range(vring_idle_trsh,
+                            used, used + descs_used)) {
+               txdata->idle += get_cycles() - txdata->last_idle;
+               wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
+                            vring_index, used, used + descs_used);
+       }
+
+       /* advance swhead */
+       wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
+       wil_vring_advance_head(vring, descs_used);
+
+       /* make sure all writes to descriptors (shared memory) are done before
+        * committing them to HW
+        */
+       wmb();
+
+       wil_w(wil, vring->hwtail, vring->swhead);
+       return 0;
+
+dma_error:
+       wil_err(wil, "TSO: DMA map page error\n");
+       while (descs_used > 0) {
+               struct wil_ctx *ctx;
+
+               i = (swhead + descs_used) % vring->size;
+               d = (struct vring_tx_desc *)&vring->va[i].tx;
+               _desc = &vring->va[i].tx;
+               *d = *_desc;
+               _desc->dma.status = TX_DMA_STATUS_DU;
+               ctx = &vring->ctx[i];
+               wil_txdesc_unmap(dev, d, ctx);
+               if (ctx->skb)
+                       dev_kfree_skb_any(ctx->skb);
+               memset(ctx, 0, sizeof(*ctx));
+               descs_used--;
+       }
+
+err_exit:
+       return -EINVAL;
+}
+
 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                          struct sk_buff *skb)
 {
@@ -1128,7 +1473,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        bool mcast = (vring_index == wil->bcast_vring);
        uint len = skb_headlen(skb);
 
-       wil_dbg_txrx(wil, "%s()\n", __func__);
+       wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+                    __func__, skb->len, vring_index);
 
        if (unlikely(!txdata->enabled))
                return -EINVAL;
@@ -1159,14 +1505,14 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                        d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
        }
        /* Process TCP/UDP checksum offloading */
-       if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
+       if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
                wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
                        vring_index);
                goto dma_error;
        }
 
        vring->ctx[i].nr_frags = nr_frags;
-       wil_tx_desc_set_nr_frags(d, nr_frags);
+       wil_tx_desc_set_nr_frags(d, nr_frags + 1);
 
        /* middle segments */
        for (; f < nr_frags; f++) {
@@ -1190,7 +1536,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
                 * if it succeeded for 1-st descriptor,
                 * it will succeed here too
                 */
-               wil_tx_desc_offload_cksum_set(wil, d, skb);
+               wil_tx_desc_offload_setup(d, skb);
        }
        /* for the last seg only */
        d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
@@ -1221,7 +1567,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
                     vring->swhead);
        trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
-       iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+
+       /* make sure all writes to descriptors (shared memory) are done before
+        * committing them to HW
+        */
+       wmb();
+
+       wil_w(wil, vring->hwtail, vring->swhead);
 
        return 0;
  dma_error:
@@ -1254,8 +1606,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        int rc;
 
        spin_lock(&txdata->lock);
-       rc = __wil_tx_vring(wil, vring, skb);
+
+       rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
+            (wil, vring, skb);
+
        spin_unlock(&txdata->lock);
+
        return rc;
 }
 
@@ -1382,7 +1738,8 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                struct wil_ctx *ctx = &vring->ctx[vring->swtail];
                /**
                 * For the fragmented skb, HW will set DU bit only for the
-                * last fragment. look for it
+                * last fragment. look for it.
+                * In TSO the first DU will include hdr desc
                 */
                int lf = (vring->swtail + ctx->nr_frags) % vring->size;
                /* TODO: check we are not past head */
index 0c4638487c742c3c5cbace887a95562a8e618ea2..82a8f9a030e7e9179db31f6be1cec985a801e8a3 100644 (file)
@@ -291,6 +291,14 @@ struct vring_tx_dma {
        __le16 length;
 } __packed;
 
+/* TSO type used in dma descriptor d0 bits 11-12 */
+enum {
+       wil_tso_type_hdr = 0,
+       wil_tso_type_first = 1,
+       wil_tso_type_mid  = 2,
+       wil_tso_type_lst  = 3,
+};
+
 /* Rx descriptor - MAC part
  * [dword 0]
  * bit  0.. 3 : tid:4 The QoS (b3-0) TID Field
index 275355d46a36fc8c2ae85585fc7188c0a7e64473..dd4ea926b8e31eb8721a8c8e2f2a209fd7f0017b 100644 (file)
@@ -127,16 +127,6 @@ struct RGF_ICR {
        u32 IMC; /* Mask Clear, write 1 to clear */
 } __packed;
 
-struct RGF_BL {
-       u32 ready;              /* 0x880A3C bit [0] */
-#define BIT_BL_READY   BIT(0)
-       u32 version;            /* 0x880A40 version of the BL struct */
-       u32 rf_type;            /* 0x880A44 ID of the connected RF */
-       u32 baseband_type;      /* 0x880A48 ID of the baseband */
-       u8  mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
-       u8 pad[2];
-} __packed;
-
 /* registers - FW addresses */
 #define RGF_USER_USAGE_1               (0x880004)
 #define RGF_USER_USAGE_6               (0x880018)
@@ -262,9 +252,8 @@ enum {
 };
 
 /* popular locations */
-#define HOST_MBOX   HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
-#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
-       offsetof(struct RGF_ICR, ICS))
+#define RGF_MBOX   RGF_USER_USER_SCRATCH_PAD
+#define HOST_MBOX   HOSTADDR(RGF_MBOX)
 #define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
 
 /* ISR register bits */
@@ -434,12 +423,12 @@ struct pci_dev;
  * @ssn: Starting Sequence Number expected to be aggregated.
  * @buf_size: buffer size for incoming A-MPDUs
  * @timeout: reset timer value (in TUs).
+ * @ssn_last_drop: SSN of the last dropped frame
+ * @total: total number of processed incoming frames
+ * @drop_dup: duplicate frames dropped for this reorder buffer
+ * @drop_old: old frames dropped for this reorder buffer
  * @dialog_token: dialog token for aggregation session
- * @rcu_head: RCU head used for freeing this struct
- *
- * This structure's lifetime is managed by RCU, assignments to
- * the array holding it must hold the aggregation mutex.
- *
+ * @first_time: true when this buffer used 1-st time
  */
 struct wil_tid_ampdu_rx {
        struct sk_buff **reorder_buf;
@@ -453,6 +442,9 @@ struct wil_tid_ampdu_rx {
        u16 buf_size;
        u16 timeout;
        u16 ssn_last_drop;
+       unsigned long long total; /* frames processed */
+       unsigned long long drop_dup;
+       unsigned long long drop_old;
        u8 dialog_token;
        bool first_time; /* is it 1-st time this buffer used? */
 };
@@ -543,7 +535,6 @@ struct pmc_ctx {
 
 struct wil6210_priv {
        struct pci_dev *pdev;
-       int n_msi;
        struct wireless_dev *wdev;
        void __iomem *csr;
        DECLARE_BITMAP(status, wil_status_last);
@@ -559,6 +550,8 @@ struct wil6210_priv {
        /* profile */
        u32 monitor_flags;
        u32 privacy; /* secure connection? */
+       u8 hidden_ssid; /* relevant in AP mode */
+       u16 channel; /* relevant in AP mode */
        int sinfo_gen;
        u32 ap_isolate; /* no intra-BSS communication */
        /* interrupt moderation */
@@ -654,6 +647,33 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
 #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
 #define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
 #define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+
+/* target operations */
+/* register read */
+static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
+{
+       return readl(wil->csr + HOSTADDR(reg));
+}
+
+/* register write. wmb() to make sure it is completed */
+static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+       writel(val, wil->csr + HOSTADDR(reg));
+       wmb(); /* wait for write to propagate to the HW */
+}
+
+/* register set = read, OR, write */
+static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+       wil_w(wil, reg, wil_r(wil, reg) | val);
+}
+
+/* register clear = read, AND with inverted, write */
+static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+       wil_w(wil, reg, wil_r(wil, reg) & ~val);
+}
 
 #if defined(CONFIG_DYNAMIC_DEBUG)
 #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize,    \
@@ -744,7 +764,7 @@ void wil_back_tx_worker(struct work_struct *work);
 void wil_back_tx_flush(struct wil6210_priv *wil);
 
 void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
 void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
 void wil_mask_irq(struct wil6210_priv *wil);
 void wil_unmask_irq(struct wil6210_priv *wil);
@@ -796,4 +816,8 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
 int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
 int wil_request_firmware(struct wil6210_priv *wil, const char *name);
 
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+
 #endif /* __WIL6210_H__ */
index de15f1422fe9faefe225dd618aecbe688c7a443d..2e831bf20117f5316ff6b1dd6fa22dddc8d95e04 100644 (file)
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "linux/device.h"
+#include <linux/device.h>
 #include "wil_platform.h"
 
 int __init wil_platform_modinit(void)
index c759759afbb2dfe63f8d2f7dd20f718d16d2591b..7a257360c4201a6c9dab3e21ab43a09f724ce8d3 100644 (file)
@@ -228,8 +228,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
        /* wait till FW finish with previous command */
        for (retry = 5; retry > 0; retry--) {
-               r->tail = ioread32(wil->csr + HOST_MBOX +
-                                  offsetof(struct wil6210_mbox_ctl, tx.tail));
+               r->tail = wil_r(wil, RGF_MBOX +
+                               offsetof(struct wil6210_mbox_ctl, tx.tail));
                if (next_head != r->tail)
                        break;
                msleep(20);
@@ -254,16 +254,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
        wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
        wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
        /* mark entry as full */
-       iowrite32(1, wil->csr + HOSTADDR(r->head) +
-                 offsetof(struct wil6210_mbox_ring_desc, sync));
+       wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
        /* advance next ptr */
-       iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
-                 offsetof(struct wil6210_mbox_ctl, tx.head));
+       wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
+             r->head = next_head);
 
        trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
 
        /* interrupt to FW */
-       iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+       wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
+             SW_INT_MBOX);
 
        return 0;
 }
@@ -312,22 +312,44 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
        struct wiphy *wiphy = wil_to_wiphy(wil);
        struct ieee80211_mgmt *rx_mgmt_frame =
                        (struct ieee80211_mgmt *)data->payload;
-       int ch_no = data->info.channel+1;
-       u32 freq = ieee80211_channel_to_frequency(ch_no,
-                       IEEE80211_BAND_60GHZ);
-       struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
-       s32 signal = data->info.sqi;
-       __le16 fc = rx_mgmt_frame->frame_control;
-       u32 d_len = le32_to_cpu(data->info.len);
-       u16 d_status = le16_to_cpu(data->info.status);
-
-       wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+       int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
+       int ch_no;
+       u32 freq;
+       struct ieee80211_channel *channel;
+       s32 signal;
+       __le16 fc;
+       u32 d_len;
+       u16 d_status;
+
+       if (flen < 0) {
+               wil_err(wil, "MGMT Rx: short event, len %d\n", len);
+               return;
+       }
+
+       d_len = le32_to_cpu(data->info.len);
+       if (d_len != flen) {
+               wil_err(wil,
+                       "MGMT Rx: length mismatch, d_len %d should be %d\n",
+                       d_len, flen);
+               return;
+       }
+
+       ch_no = data->info.channel + 1;
+       freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+       channel = ieee80211_get_channel(wiphy, freq);
+       signal = data->info.sqi;
+       d_status = le16_to_cpu(data->info.status);
+       fc = rx_mgmt_frame->frame_control;
+
+       wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n",
                    data->info.channel, data->info.mcs, data->info.snr,
                    data->info.sqi);
        wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
                    le16_to_cpu(fc));
        wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
                    data->info.qid, data->info.mid, data->info.cid);
+       wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+                        d_len, true);
 
        if (!channel) {
                wil_err(wil, "Frame on unsupported channel\n");
@@ -363,6 +385,17 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
        }
 }
 
+static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+       struct wmi_tx_mgmt_packet_event *data = d;
+       struct ieee80211_mgmt *mgmt_frame =
+                       (struct ieee80211_mgmt *)data->payload;
+       int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
+
+       wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
+                        flen, true);
+}
+
 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
                                  void *d, int len)
 {
@@ -659,6 +692,7 @@ static const struct {
        {WMI_READY_EVENTID,             wmi_evt_ready},
        {WMI_FW_READY_EVENTID,          wmi_evt_fw_ready},
        {WMI_RX_MGMT_PACKET_EVENTID,    wmi_evt_rx_mgmt},
+       {WMI_TX_MGMT_PACKET_EVENTID,            wmi_evt_tx_mgmt},
        {WMI_SCAN_COMPLETE_EVENTID,     wmi_evt_scan_complete},
        {WMI_CONNECT_EVENTID,           wmi_evt_connect},
        {WMI_DISCONNECT_EVENTID,        wmi_evt_disconnect},
@@ -695,8 +729,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                u16 len;
                bool q;
 
-               r->head = ioread32(wil->csr + HOST_MBOX +
-                                  offsetof(struct wil6210_mbox_ctl, rx.head));
+               r->head = wil_r(wil, RGF_MBOX +
+                               offsetof(struct wil6210_mbox_ctl, rx.head));
                if (r->tail == r->head)
                        break;
 
@@ -734,8 +768,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                cmd = (void *)&evt->event.wmi;
                wil_memcpy_fromio_32(cmd, src, len);
                /* mark entry as empty */
-               iowrite32(0, wil->csr + HOSTADDR(r->tail) +
-                         offsetof(struct wil6210_mbox_ring_desc, sync));
+               wil_w(wil, r->tail +
+                     offsetof(struct wil6210_mbox_ring_desc, sync), 0);
                /* indicate */
                if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
                    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
@@ -754,8 +788,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                /* advance tail */
                r->tail = r->base + ((r->tail - r->base +
                          sizeof(struct wil6210_mbox_ring_desc)) % r->size);
-               iowrite32(r->tail, wil->csr + HOST_MBOX +
-                         offsetof(struct wil6210_mbox_ctl, rx.tail));
+               wil_w(wil, RGF_MBOX +
+                     offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
 
                /* add to the pending list */
                spin_lock_irqsave(&wil->wmi_ev_lock, flags);
@@ -988,12 +1022,21 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
 
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
 {
+       static const char *const names[] = {
+               [WMI_FRAME_BEACON]      = "BEACON",
+               [WMI_FRAME_PROBE_REQ]   = "PROBE_REQ",
+               [WMI_FRAME_PROBE_RESP]  = "WMI_FRAME_PROBE_RESP",
+               [WMI_FRAME_ASSOC_REQ]   = "WMI_FRAME_ASSOC_REQ",
+               [WMI_FRAME_ASSOC_RESP]  = "WMI_FRAME_ASSOC_RESP",
+       };
        int rc;
        u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
        struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
 
-       if (!cmd)
-               return -ENOMEM;
+       if (!cmd) {
+               rc = -ENOMEM;
+               goto out;
+       }
        if (!ie)
                ie_len = 0;
 
@@ -1003,6 +1046,12 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
        memcpy(cmd->ie_info, ie, ie_len);
        rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
        kfree(cmd);
+out:
+       if (rc) {
+               const char *name = type < ARRAY_SIZE(names) ?
+                                  names[type] : "??";
+               wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
+       }
 
        return rc;
 }
@@ -1129,15 +1178,42 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
 
 int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
 {
+       int rc;
+       u16 reason_code;
        struct wmi_disconnect_sta_cmd cmd = {
                .disconnect_reason = cpu_to_le16(reason),
        };
+       struct {
+               struct wil6210_mbox_hdr_wmi wmi;
+               struct wmi_disconnect_event evt;
+       } __packed reply;
 
        ether_addr_copy(cmd.dst_mac, mac);
 
        wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
 
-       return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+       rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
+                     WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
+       /* failure to disconnect in reasonable time treated as FW error */
+       if (rc) {
+               wil_fw_error_recovery(wil);
+               return rc;
+       }
+
+       /* call event handler manually after processing wmi_call,
+        * to avoid deadlock - disconnect event handler acquires wil->mutex
+        * while it is already held here
+        */
+       reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
+
+       wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+                   reply.evt.bssid, reason_code,
+                   reply.evt.disconnect_reason);
+
+       wil->sinfo_gen++;
+       wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
+
+       return 0;
 }
 
 int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
@@ -1279,7 +1355,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
                /* search for handler */
                if (!wmi_evt_call_handler(wil, id, evt_data,
                                          len - sizeof(*wmi))) {
-                       wil_err(wil, "Unhandled event 0x%04x\n", id);
+                       wil_info(wil, "Unhandled event 0x%04x\n", id);
                }
        } else {
                wil_err(wil, "Unknown event type\n");
index 916123a3d74e71053a908491f64eec4a0d978a76..a335f94c72ff7019c3017afadc7946ec26283cc3 100644 (file)
@@ -929,8 +929,8 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
        b43_lo_write(dev, &cal->ctl);
 }
 
-/* Periodic LO maintanance work */
-void b43_lo_g_maintanance_work(struct b43_wldev *dev)
+/* Periodic LO maintenance work */
+void b43_lo_g_maintenance_work(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
        struct b43_phy_g *gphy = phy->g;
index 3b27e20eff80ff1d295d50e879888ed938eda913..7b4df3883bc24ed4f98ef7e8ca755c2304d3a1ad 100644 (file)
@@ -80,7 +80,7 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
 
 void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all);
 
-void b43_lo_g_maintanance_work(struct b43_wldev *dev);
+void b43_lo_g_maintenance_work(struct b43_wldev *dev);
 void b43_lo_g_cleanup(struct b43_wldev *dev);
 void b43_lo_g_init(struct b43_wldev *dev);
 
index 727ce6edb4b3831faf6d254ad789b4a9a1a7c336..462310e6e88fbe85716fef77410f2302f3de64b9 100644 (file)
@@ -3004,7 +3004,7 @@ static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
                   phy->rev == 1) {
                //TODO: implement rev1 workaround
        }
-       b43_lo_g_maintanance_work(dev);
+       b43_lo_g_maintenance_work(dev);
        b43_mac_enable(dev);
 }
 
index d86d1f1f1c91d70ea803cfd07737ecaab6699400..ffe526070d6f19a99de1a777129a51692e508147 100644 (file)
@@ -5785,6 +5785,7 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
 
 static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
+       struct brcmf_pub *drvr = ifp->drvr;
        struct ieee80211_supported_band *band;
        __le32 bandlist[3];
        u32 n_bands;
@@ -5798,6 +5799,19 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
        if (err)
                return err;
 
+       for (i = 0; i < wiphy->iface_combinations->max_interfaces &&
+            i < ARRAY_SIZE(drvr->addresses); i++) {
+               u8 *addr = drvr->addresses[i].addr;
+
+               memcpy(addr, drvr->mac, ETH_ALEN);
+               if (i) {
+                       addr[0] |= BIT(1);
+                       addr[ETH_ALEN - 1] ^= i;
+               }
+       }
+       wiphy->addresses = drvr->addresses;
+       wiphy->n_addresses = i;
+
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
        wiphy->cipher_suites = __wl_cipher_suites;
        wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
index fd74a9c6e9ac62ac310c688f5fade1acbf7c7d73..746304121cdbf99bf02845462689737c21272c02 100644 (file)
@@ -21,6 +21,7 @@
 #ifndef BRCMFMAC_CORE_H
 #define BRCMFMAC_CORE_H
 
+#include <net/cfg80211.h>
 #include "fweh.h"
 
 #define TOE_TX_CSUM_OL         0x00000001
@@ -118,6 +119,8 @@ struct brcmf_pub {
        /* Multicast data packets sent to dongle */
        unsigned long tx_multicast;
 
+       struct mac_address addresses[BRCMF_MAX_IFS];
+
        struct brcmf_if *iflist[BRCMF_MAX_IFS];
 
        struct mutex proto_block;
index d36f5f3d931b55f1df10da3b30d87fdd9b931611..f990e3d0e696e7f76b4ef501853eb6c5b7ff2831 100644 (file)
@@ -2564,15 +2564,6 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
        }
 }
 
-static void atomic_orr(int val, atomic_t *v)
-{
-       int old_val;
-
-       old_val = atomic_read(v);
-       while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
-               old_val = atomic_read(v);
-}
-
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
        struct brcmf_core *buscore;
@@ -2595,7 +2586,7 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
        if (val) {
                brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
                bus->sdcnt.f1regdata++;
-               atomic_orr(val, &bus->intstatus);
+               atomic_or(val, &bus->intstatus);
        }
 
        return ret;
@@ -2712,7 +2703,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
 
        /* Keep still-pending events for next scheduling */
        if (intstatus)
-               atomic_orr(intstatus, &bus->intstatus);
+               atomic_or(intstatus, &bus->intstatus);
 
        brcmf_sdio_clrintr(bus);
 
index ab775a5d5b331f975c6b4dadb300a0744556d9b1..d2c5747e3ac9233731d70d8ff391fc8791beaebb 100644 (file)
@@ -1472,9 +1472,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
        wl->timers = t;
 
 #ifdef DEBUG
-       t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
-       if (t->name)
-               strcpy(t->name, name);
+       t->name = kstrdup(name, GFP_ATOMIC);
 #endif
 
        return t;
index 7603546d2de322cf8fb5dd5bf0d5ff3e48d8a6be..29185aeccba8b721d3d9c18177f5f86b10d145a2 100644 (file)
@@ -467,7 +467,6 @@ static struct spi_driver spi_driver = {
        .remove         = cw1200_spi_disconnect,
        .driver = {
                .name           = "cw1200_wlan_spi",
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
 #ifdef CONFIG_PM
                .pm             = &cw1200_pm_ops,
index 08eb229e7816010f11e702d679cb178b213362d2..36818c7f30b962d549867c354521cc72cbb4d26d 100644 (file)
@@ -1410,7 +1410,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
 static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
 {
 
-#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
+#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50))
 
        struct host_command cmd = {
                .host_command = CARD_DISABLE_PHY_OFF,
index 7f4cb692cc57e97de9b6bd78ea717df97d03221f..af1b3e6839fa6db3e69e90bee7a50923f1d0230e 100644 (file)
@@ -3259,7 +3259,7 @@ il3945_show_measurement(struct device *d, struct device_attribute *attr,
 
        while (size && PAGE_SIZE - len) {
                hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
-                                  PAGE_SIZE - len, 1);
+                                  PAGE_SIZE - len, true);
                len = strlen(buf);
                if (PAGE_SIZE - len)
                        buf[len++] = '\n';
index 34401015319631bb21afda0f48f283f13f6a2343..908b9f4fef6f3b4e3522b8f2c5a03166ca0cf2b8 100644 (file)
@@ -515,12 +515,8 @@ il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
            scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
                      eeprom_ver);
        for (ofs = 0; ofs < eeprom_len; ofs += 16) {
-               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-               hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
-                                  buf_size - pos, 0);
-               pos += strlen(buf + pos);
-               if (buf_size - pos > 0)
-                       buf[pos++] = '\n';
+               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+                                ofs, ptr + ofs);
        }
 
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
index c160dad03037bd01399a8747096a3653ee67cd3a..101ef310929220a16ae3222246eee3de593185cb 100644 (file)
@@ -122,9 +122,8 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
 void iwl_down(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
-int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *cmd);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode,
+                    struct iwl_rx_cmd_buffer *rxb);
 
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 
@@ -216,11 +215,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta, u16 tid);
 int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid);
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-                                  struct iwl_rx_cmd_buffer *rxb,
-                                  struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
 static inline u32 iwl_tx_status_to_mac80211(u32 status)
 {
@@ -277,9 +274,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
 
 /* bt coex */
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd);
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
 void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
 void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@@ -332,8 +326,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 
 int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                    struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
                      struct ieee80211_sta *sta);
 
index 074977ede34391380c8c2383435847ae5dddb8a7..0ba3e56d6015b16feb34acc71c36a81d42453eb6 100644 (file)
@@ -680,9 +680,8 @@ struct iwl_priv {
        enum ieee80211_band band;
        u8 valid_contexts;
 
-       int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd);
+       void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb);
 
        struct iwl_notif_wait_data notif_wait;
 
index 1d2223df5cb01fc84a06a55e431825175864ba34..ab45819c1fbbf6d0080813c26090bb095082672d 100644 (file)
@@ -659,9 +659,8 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
        return need_update;
 }
 
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
@@ -669,7 +668,7 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
 
        if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
                /* bt coex disabled */
-               return 0;
+               return;
        }
 
        IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@@ -714,7 +713,6 @@ int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
        /* FIXME: based on notification, adjust the prio_boost */
 
        priv->bt_ci_compliance = coex->bt_ci_compliance;
-       return 0;
 }
 
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
index 7acaa266b704699754a30bda7f5cb1547767e8ae..453f7c315ab525dcda15696676b16ef3b02f46f1 100644 (file)
@@ -250,12 +250,24 @@ static int __iwl_up(struct iwl_priv *priv)
                }
        }
 
+       ret = iwl_trans_start_hw(priv->trans);
+       if (ret) {
+               IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+               goto error;
+       }
+
        ret = iwl_run_init_ucode(priv);
        if (ret) {
                IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
                goto error;
        }
 
+       ret = iwl_trans_start_hw(priv->trans);
+       if (ret) {
+               IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+               goto error;
+       }
+
        ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
        if (ret) {
                IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
@@ -432,7 +444,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
                u32 error_id;
        } err_info;
        struct iwl_notification_wait status_wait;
-       static const u8 status_cmd[] = {
+       static const u16 status_cmd[] = {
                REPLY_WOWLAN_GET_STATUS,
        };
        struct iwlagn_wowlan_status status_data = {};
index 3bd7c86e90d9fca5c43c6a95ac7795ec74ddeaf7..cef921c1a62325a9f9d8cd03273ec95e9eab2204 100644 (file)
@@ -1416,11 +1416,11 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
 /*
  * Try to switch to new modulation mode from legacy
  */
-static int rs_move_legacy_other(struct iwl_priv *priv,
-                               struct iwl_lq_sta *lq_sta,
-                               struct ieee80211_conf *conf,
-                               struct ieee80211_sta *sta,
-                               int index)
+static void rs_move_legacy_other(struct iwl_priv *priv,
+                                struct iwl_lq_sta *lq_sta,
+                                struct ieee80211_conf *conf,
+                                struct ieee80211_sta *sta,
+                                int index)
 {
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct iwl_scale_tbl_info *search_tbl =
@@ -1575,7 +1575,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
 
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
 
 out:
        lq_sta->search_better_tbl = 1;
@@ -1584,17 +1584,15 @@ out:
                tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-       return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from SISO
  */
-static int rs_move_siso_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_siso_to_other(struct iwl_priv *priv,
+                                 struct iwl_lq_sta *lq_sta,
+                                 struct ieee80211_conf *conf,
+                                 struct ieee80211_sta *sta, int index)
 {
        u8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1747,7 +1745,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
 
  out:
        lq_sta->search_better_tbl = 1;
@@ -1756,17 +1754,15 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
                tbl->action = IWL_SISO_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-
-       return 0;
 }
 
 /*
  * Try to switch to new modulation mode from MIMO2
  */
-static int rs_move_mimo2_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_mimo2_to_other(struct iwl_priv *priv,
+                                  struct iwl_lq_sta *lq_sta,
+                                  struct ieee80211_conf *conf,
+                                  struct ieee80211_sta *sta, int index)
 {
        s8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1917,7 +1913,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
  out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
@@ -1926,17 +1922,15 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
 
-       return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from MIMO3
  */
-static int rs_move_mimo3_to_other(struct iwl_priv *priv,
-                                struct iwl_lq_sta *lq_sta,
-                                struct ieee80211_conf *conf,
-                                struct ieee80211_sta *sta, int index)
+static void rs_move_mimo3_to_other(struct iwl_priv *priv,
+                                  struct iwl_lq_sta *lq_sta,
+                                  struct ieee80211_conf *conf,
+                                  struct ieee80211_sta *sta, int index)
 {
        s8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -2093,7 +2087,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
                        break;
        }
        search_tbl->lq_type = LQ_NONE;
-       return 0;
+       return;
  out:
        lq_sta->search_better_tbl = 1;
        tbl->action++;
@@ -2101,9 +2095,6 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
                tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
-
-       return 0;
-
 }
 
 /*
index 5a91f5d6b1dcc5de325bb9465c995cebb096c710..15176981dc8f62c8754a6f939989fdc1863f0368 100644 (file)
@@ -123,9 +123,8 @@ const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
  *
  ******************************************************************************/
 
-static int iwlagn_rx_reply_error(struct iwl_priv *priv,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_error(struct iwl_priv *priv,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -136,11 +135,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv,
                err_resp->cmd_id,
                le16_to_cpu(err_resp->bad_cmd_seq_num),
                le32_to_cpu(err_resp->error_info));
-       return 0;
 }
 
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_csa_notification *csa = (void *)pkt->data;
@@ -152,7 +149,7 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
        struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
 
        if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-               return 0;
+               return;
 
        if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
                rxon->channel = csa->channel;
@@ -165,13 +162,11 @@ static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                        le16_to_cpu(csa->channel));
                iwl_chswitch_done(priv, false);
        }
-       return 0;
 }
 
 
-static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
-                                         struct iwl_rx_cmd_buffer *rxb,
-                                         struct iwl_device_cmd *cmd)
+static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+                                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_spectrum_notification *report = (void *)pkt->data;
@@ -179,17 +174,15 @@ static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
        if (!report->state) {
                IWL_DEBUG_11H(priv,
                        "Spectrum Measure Notification: Start\n");
-               return 0;
+               return;
        }
 
        memcpy(&priv->measure_report, report, sizeof(*report));
        priv->measurement_status |= MEASUREMENT_READY;
-       return 0;
 }
 
-static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+                                    struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -197,24 +190,20 @@ static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
        IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
                     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
 #endif
-       return 0;
 }
 
-static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+                                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 __maybe_unused len = iwl_rx_packet_len(pkt);
        IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
                        "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
        iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
-       return 0;
 }
 
-static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
@@ -232,8 +221,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
 #endif
 
        priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-       return 0;
 }
 
 /**
@@ -448,9 +435,8 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv,
 }
 #endif
 
-static int iwlagn_rx_statistics(struct iwl_priv *priv,
-                             struct iwl_rx_cmd_buffer *rxb,
-                             struct iwl_device_cmd *cmd)
+static void iwlagn_rx_statistics(struct iwl_priv *priv,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        unsigned long stamp = jiffies;
        const int reg_recalib_period = 60;
@@ -505,7 +491,7 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
                          len, sizeof(struct iwl_bt_notif_statistics),
                          sizeof(struct iwl_notif_statistics));
                spin_unlock(&priv->statistics.lock);
-               return 0;
+               return;
        }
 
        change = common->temperature != priv->statistics.common.temperature ||
@@ -550,13 +536,10 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
                priv->lib->temperature(priv);
 
        spin_unlock(&priv->statistics.lock);
-
-       return 0;
 }
 
-static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_notif_statistics *stats = (void *)pkt->data;
@@ -572,15 +555,14 @@ static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
 #endif
                IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
        }
-       iwlagn_rx_statistics(priv, rxb, cmd);
-       return 0;
+
+       iwlagn_rx_statistics(priv, rxb);
 }
 
 /* Handle notification from uCode that card's power state is changing
  * due to software, hardware, or critical temperature RFKILL */
-static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -627,12 +609,10 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
             test_bit(STATUS_RF_KILL_HW, &priv->status)))
                wiphy_rfkill_set_hw_state(priv->hw->wiphy,
                        test_bit(STATUS_RF_KILL_HW, &priv->status));
-       return 0;
 }
 
-static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd)
+static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+                                         struct iwl_rx_cmd_buffer *rxb)
 
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -649,14 +629,12 @@ static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
                if (!test_bit(STATUS_SCANNING, &priv->status))
                        iwl_init_sensitivity(priv);
        }
-       return 0;
 }
 
 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -664,7 +642,6 @@ static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
        priv->ampdu_ref++;
        memcpy(&priv->last_phy_res, pkt->data,
               sizeof(struct iwl_rx_phy_res));
-       return 0;
 }
 
 /*
@@ -890,9 +867,8 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
 }
 
 /* Called for REPLY_RX_MPDU_CMD */
-static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+                              struct iwl_rx_cmd_buffer *rxb)
 {
        struct ieee80211_hdr *header;
        struct ieee80211_rx_status rx_status = {};
@@ -906,7 +882,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
 
        if (!priv->last_phy_res_valid) {
                IWL_ERR(priv, "MPDU frame without cached PHY data\n");
-               return 0;
+               return;
        }
        phy_res = &priv->last_phy_res;
        amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -919,14 +895,14 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
        if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
                IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
                                phy_res->cfg_phy_cnt);
-               return 0;
+               return;
        }
 
        if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
            !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
                IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
                                le32_to_cpu(rx_pkt_status));
-               return 0;
+               return;
        }
 
        /* This will be used in several places later */
@@ -998,12 +974,10 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
 
        iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
                                    rxb, &rx_status);
-       return 0;
 }
 
-static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd)
+static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_wipan_noa_data *new_data, *old_data;
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1041,8 +1015,6 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
 
        if (old_data)
                kfree_rcu(old_data, rcu_head);
-
-       return 0;
 }
 
 /**
@@ -1053,8 +1025,7 @@ static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
  */
 void iwl_setup_rx_handlers(struct iwl_priv *priv)
 {
-       int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+       void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
        handlers = priv->rx_handlers;
 
@@ -1102,12 +1073,10 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
                iwlagn_bt_rx_handler_setup(priv);
 }
 
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-                   struct iwl_device_cmd *cmd)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-       int err = 0;
 
        /*
         * Do the notification wait before RX handlers so
@@ -1121,12 +1090,11 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
         *   rx_handlers table.  See iwl_setup_rx_handlers() */
        if (priv->rx_handlers[pkt->hdr.cmd]) {
                priv->rx_handlers_stats[pkt->hdr.cmd]++;
-               err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+               priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
        } else {
                /* No handling needed */
                IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
                             iwl_dvm_get_cmd_string(pkt->hdr.cmd),
                             pkt->hdr.cmd);
        }
-       return err;
 }
index ed50de6362ed1d5dcd56b45243ff0b140dcbafe5..85ceceb34fcca76907cadbae633b49b107acdd91 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -123,7 +124,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
        __le32 old_filter = send->filter_flags;
        u8 old_dev_type = send->dev_type;
        int ret;
-       static const u8 deactivate_cmd[] = {
+       static const u16 deactivate_cmd[] = {
                REPLY_WIPAN_DEACTIVATION_COMPLETE
        };
 
index 43bef901e8f9a80a7c3a56f63a7d2da93fda7076..648159495bbcb7df94c54dd5858449f360c47074 100644 (file)
@@ -247,9 +247,8 @@ void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
 }
 
 /* Service response to REPLY_SCAN_CMD (0x80) */
-static int iwl_rx_reply_scan(struct iwl_priv *priv,
-                             struct iwl_rx_cmd_buffer *rxb,
-                             struct iwl_device_cmd *cmd)
+static void iwl_rx_reply_scan(struct iwl_priv *priv,
+                             struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -257,13 +256,11 @@ static int iwl_rx_reply_scan(struct iwl_priv *priv,
 
        IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
 #endif
-       return 0;
 }
 
 /* Service SCAN_START_NOTIFICATION (0x82) */
-static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
+                                   struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_scanstart_notification *notif = (void *)pkt->data;
@@ -277,14 +274,11 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
                       le32_to_cpu(notif->tsf_high),
                       le32_to_cpu(notif->tsf_low),
                       notif->status, notif->beacon_timer);
-
-       return 0;
 }
 
 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
+                                     struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -303,13 +297,11 @@ static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
                       le32_to_cpu(notif->statistics[0]),
                       le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
 #endif
-       return 0;
 }
 
 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
-                                      struct iwl_rx_cmd_buffer *rxb,
-                                      struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
@@ -356,7 +348,6 @@ static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
                queue_work(priv->workqueue,
                           &priv->bt_traffic_change_work);
        }
-       return 0;
 }
 
 void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
index 6ec86adbe4a1fcc9df9aaf0d8b73ca4b48ac2e9b..0fa67d3b72356737256bae9c4f7d838b38edfb68 100644 (file)
@@ -60,41 +60,28 @@ static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
        return 0;
 }
 
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
-                                   struct iwl_addsta_cmd *addsta,
-                                   struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+                                    struct iwl_rx_packet *pkt)
 {
        struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
-       u8 sta_id = addsta->sta.sta_id;
-       int ret = -EIO;
 
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-                       pkt->hdr.flags);
-               return ret;
-       }
-
-       IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
-                      sta_id);
+       IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
 
        spin_lock_bh(&priv->sta_lock);
 
        switch (add_sta_resp->status) {
        case ADD_STA_SUCCESS_MSK:
                IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
-               ret = iwl_sta_ucode_activate(priv, sta_id);
                break;
        case ADD_STA_NO_ROOM_IN_TABLE:
-               IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
-                       sta_id);
+               IWL_ERR(priv, "Adding station failed, no room in table.\n");
                break;
        case ADD_STA_NO_BLOCK_ACK_RESOURCE:
-               IWL_ERR(priv, "Adding station %d failed, no block ack "
-                       "resource.\n", sta_id);
+               IWL_ERR(priv,
+                       "Adding station failed, no block ack resource.\n");
                break;
        case ADD_STA_MODIFY_NON_EXIST_STA:
-               IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
-                       sta_id);
+               IWL_ERR(priv, "Attempting to modify non-existing station\n");
                break;
        default:
                IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
@@ -102,37 +89,14 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
                break;
        }
 
-       IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
-                      priv->stations[sta_id].sta.mode ==
-                      STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
-                      sta_id, priv->stations[sta_id].sta.sta.addr);
-
-       /*
-        * XXX: The MAC address in the command buffer is often changed from
-        * the original sent to the device. That is, the MAC address
-        * written to the command buffer often is not the same MAC address
-        * read from the command buffer when the command returns. This
-        * issue has not yet been resolved and this debugging is left to
-        * observe the problem.
-        */
-       IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
-                      priv->stations[sta_id].sta.mode ==
-                      STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
-                      addsta->sta.addr);
        spin_unlock_bh(&priv->sta_lock);
-
-       return ret;
 }
 
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
-       if (!cmd)
-               return 0;
-
-       return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
+       iwl_process_add_sta_resp(priv, pkt);
 }
 
 int iwl_send_add_sta(struct iwl_priv *priv,
@@ -146,6 +110,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                .len = { sizeof(*sta), },
        };
        u8 sta_id __maybe_unused = sta->sta.sta_id;
+       struct iwl_rx_packet *pkt;
+       struct iwl_add_sta_resp *add_sta_resp;
 
        IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
                       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
@@ -159,16 +125,22 @@ int iwl_send_add_sta(struct iwl_priv *priv,
 
        if (ret || (flags & CMD_ASYNC))
                return ret;
-       /*else the command was successfully sent in SYNC mode, need to free
-        * the reply page */
 
-       iwl_free_resp(&cmd);
+       pkt = cmd.resp_pkt;
+       add_sta_resp = (void *)pkt->data;
 
-       if (cmd.handler_status)
-               IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
-                       cmd.handler_status);
+       /* debug messages are printed in the handler */
+       if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
+               spin_lock_bh(&priv->sta_lock);
+               ret = iwl_sta_ucode_activate(priv, sta_id);
+               spin_unlock_bh(&priv->sta_lock);
+       } else {
+               ret = -EIO;
+       }
 
-       return cmd.handler_status;
+       iwl_free_resp(&cmd);
+
+       return ret;
 }
 
 bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
@@ -452,6 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
        struct iwl_rx_packet *pkt;
        int ret;
        struct iwl_rem_sta_cmd rm_sta_cmd;
+       struct iwl_rem_sta_resp *rem_sta_resp;
 
        struct iwl_host_cmd cmd = {
                .id = REPLY_REMOVE_STA,
@@ -471,29 +444,23 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
                return ret;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-                         pkt->hdr.flags);
-               ret = -EIO;
-       }
+       rem_sta_resp = (void *)pkt->data;
 
-       if (!ret) {
-               struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
-               switch (rem_sta_resp->status) {
-               case REM_STA_SUCCESS_MSK:
-                       if (!temporary) {
-                               spin_lock_bh(&priv->sta_lock);
-                               iwl_sta_ucode_deactivate(priv, sta_id);
-                               spin_unlock_bh(&priv->sta_lock);
-                       }
-                       IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
-                       break;
-               default:
-                       ret = -EIO;
-                       IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
-                       break;
+       switch (rem_sta_resp->status) {
+       case REM_STA_SUCCESS_MSK:
+               if (!temporary) {
+                       spin_lock_bh(&priv->sta_lock);
+                       iwl_sta_ucode_deactivate(priv, sta_id);
+                       spin_unlock_bh(&priv->sta_lock);
                }
+               IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+               break;
+       default:
+               ret = -EIO;
+               IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+               break;
        }
+
        iwl_free_resp(&cmd);
 
        return ret;
index 275df12a6045044cdee1ddeba1c8840caf491923..bddd19769035130dde04a589aa3d5d1cba0379d5 100644 (file)
@@ -1128,8 +1128,7 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
        }
 }
 
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1273,8 +1272,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
                skb = __skb_dequeue(&skbs);
                ieee80211_tx_status(priv->hw, skb);
        }
-
-       return 0;
 }
 
 /**
@@ -1283,9 +1280,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
  * Handles block-acknowledge notification from device, which reports success
  * of frames sent via aggregation.
  */
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-                                  struct iwl_rx_cmd_buffer *rxb,
-                                  struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
@@ -1306,7 +1302,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
        if (scd_flow >= priv->cfg->base_params->num_of_queues) {
                IWL_ERR(priv,
                        "BUG_ON scd_flow is bigger than number of queues\n");
-               return 0;
+               return;
        }
 
        sta_id = ba_resp->sta_id;
@@ -1319,7 +1315,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                if (unlikely(ba_resp->bitmap))
                        IWL_ERR(priv, "Received BA when not expected\n");
                spin_unlock_bh(&priv->sta_lock);
-               return 0;
+               return;
        }
 
        if (unlikely(scd_flow != agg->txq_id)) {
@@ -1333,7 +1329,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                                    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
                                    scd_flow, sta_id, tid, agg->txq_id);
                spin_unlock_bh(&priv->sta_lock);
-               return 0;
+               return;
        }
 
        __skb_queue_head_init(&reclaimed_skbs);
@@ -1413,6 +1409,4 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
                skb = __skb_dequeue(&reclaimed_skbs);
                ieee80211_tx_status(priv->hw, skb);
        }
-
-       return 0;
 }
index 5244e43bfafbc4617720097660ec693f5d30742f..931a8e4269ef16220a5c57db37a9175913ba3bc3 100644 (file)
@@ -3,6 +3,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -327,7 +328,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
        const struct fw_img *fw;
        int ret;
        enum iwl_ucode_type old_type;
-       static const u8 alive_cmd[] = { REPLY_ALIVE };
+       static const u16 alive_cmd[] = { REPLY_ALIVE };
 
        fw = iwl_get_ucode_image(priv, ucode_type);
        if (WARN_ON(!fw))
@@ -406,7 +407,7 @@ static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
 int iwl_run_init_ucode(struct iwl_priv *priv)
 {
        struct iwl_notification_wait calib_wait;
-       static const u8 calib_complete[] = {
+       static const u16 calib_complete[] = {
                CALIBRATION_RES_NOTIFICATION,
                CALIBRATION_COMPLETE_NOTIFICATION
        };
index cc35f796d406156286164410aa7d1adcd087c015..fa35da4edda26031e4aa9ea0d74292cabe479c26 100644 (file)
@@ -76,7 +76,7 @@
 #define IWL3165_UCODE_API_OK   13
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN  10
+#define IWL7260_UCODE_API_MIN  12
 #define IWL3165_UCODE_API_MIN  13
 
 /* NVM versions */
index 72040cd0b9794e6790daaa7f887089692ccb1d5e..7caea69570d47a2485dc9cb607d408507904ed54 100644 (file)
@@ -75,7 +75,7 @@
 #define IWL8000_UCODE_API_OK   12
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  10
+#define IWL8000_UCODE_API_MIN  12
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION            0x0a1d
@@ -154,6 +154,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
        .led_mode = IWL_LED_RF_STATE,                                   \
        .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,           \
        .d0i3 = true,                                                   \
+       .features = NETIF_F_RXCSUM,                                     \
        .non_shared_ant = ANT_A,                                        \
        .dccm_offset = IWL8260_DCCM_OFFSET,                             \
        .dccm_len = IWL8260_DCCM_LEN,                                   \
index 08c14afeb1480aca04d61bd083795eb2aec401c2..939fa229c038a1dc6beb4288a4cb7364e1238a40 100644 (file)
@@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff {
  *     mode set
  * @d0i3: device uses d0i3 instead of d3
  * @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_whitelist
  * @pwr_tx_backoffs: translation table between power limits and backoffs
  * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
  * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
@@ -348,6 +349,7 @@ struct iwl_cfg {
        bool no_power_up_nic_in_init;
        const char *default_nvm_file_B_step;
        const char *default_nvm_file_C_step;
+       netdev_features_t features;
        unsigned int max_rx_agg_size;
        bool disable_dummy_notification;
        unsigned int max_tx_agg_size;
index faa17f2e352adf72b740ffc37dfb03e747b003e1..fa716618735e05b03fdde50edd0205d4877b8020 100644 (file)
@@ -422,6 +422,7 @@ enum {
 
 /* DRAM INT TABLE */
 #define CSR_DRAM_INT_TBL_ENABLE                (1 << 31)
+#define CSR_DRAM_INIT_TBL_WRITE_POINTER        (1 << 28)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK   (1 << 27)
 
 /*
index 948ce0802fa7ceae995d0257a6a3f8dace6ccf14..eb4b99a1c8cd432e3cd3a3febf0b4b58f8af26ef 100644 (file)
@@ -36,7 +36,7 @@
 TRACE_EVENT(iwlwifi_dev_hcmd,
        TP_PROTO(const struct device *dev,
                 struct iwl_host_cmd *cmd, u16 total_size,
-                struct iwl_cmd_header *hdr),
+                struct iwl_cmd_header_wide *hdr),
        TP_ARGS(dev, cmd, total_size, hdr),
        TP_STRUCT__entry(
                DEV_ENTRY
@@ -44,11 +44,14 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
                __field(u32, flags)
        ),
        TP_fast_assign(
-               int i, offset = sizeof(*hdr);
+               int i, offset = sizeof(struct iwl_cmd_header);
+
+               if (hdr->group_id)
+                       offset = sizeof(struct iwl_cmd_header_wide);
 
                DEV_ASSIGN;
                __entry->flags = cmd->flags;
-               memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
+               memcpy(__get_dynamic_array(hcmd), hdr, offset);
 
                for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                        if (!cmd->len[i])
@@ -58,8 +61,9 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
                        offset += cmd->len[i];
                }
        ),
-       TP_printk("[%s] hcmd %#.2x (%ssync)",
-                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
+       TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
+                 __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
+                 ((u8 *)__get_dynamic_array(hcmd))[0],
                  __entry->flags & CMD_ASYNC ? "a" : "")
 );
 
index d56064861a9c353dfb9fcf1720e1abde6c3fcf9d..d45dc021cda2c0715b8d7e740ff90b46589ae141 100644 (file)
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
 /**
  * struct iwl_rb_status - reserve buffer status
  *     host memory mapped FH registers
index a9b5ae4ebec021277efc454730d03b562add3210..926e4568d36c8eac0636340641769f7a6d6d42d9 100644 (file)
@@ -247,9 +247,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
  * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
- * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
- *     regardless of the band or the number of the probes. FW will calculate
- *     the actual dwell time.
+ * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
  * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
  *     through the dedicated host command.
  * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
@@ -266,7 +264,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = (__force iwl_ucode_tlv_api_t)9,
        IWL_UCODE_TLV_API_HDC_PHASE_0           = (__force iwl_ucode_tlv_api_t)10,
        IWL_UCODE_TLV_API_TX_POWER_DEV          = (__force iwl_ucode_tlv_api_t)11,
-       IWL_UCODE_TLV_API_BASIC_DWELL           = (__force iwl_ucode_tlv_api_t)13,
+       IWL_UCODE_TLV_API_WIDE_CMD_HDR          = (__force iwl_ucode_tlv_api_t)14,
        IWL_UCODE_TLV_API_SCD_CFG               = (__force iwl_ucode_tlv_api_t)15,
        IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = (__force iwl_ucode_tlv_api_t)16,
        IWL_UCODE_TLV_API_ASYNC_DTM             = (__force iwl_ucode_tlv_api_t)17,
@@ -284,6 +282,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
  * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
  * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
  * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
  * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
  *     tx power value into TPC Report action frame and Link Measurement Report
@@ -298,6 +297,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -311,6 +311,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_LAR_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)1,
        IWL_UCODE_TLV_CAPA_UMAC_SCAN                    = (__force iwl_ucode_tlv_capa_t)2,
        IWL_UCODE_TLV_CAPA_BEAMFORMER                   = (__force iwl_ucode_tlv_capa_t)3,
+       IWL_UCODE_TLV_CAPA_TOF_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)5,
        IWL_UCODE_TLV_CAPA_TDLS_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)6,
        IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT    = (__force iwl_ucode_tlv_capa_t)8,
        IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT      = (__force iwl_ucode_tlv_capa_t)9,
@@ -320,6 +321,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = (__force iwl_ucode_tlv_capa_t)13,
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = (__force iwl_ucode_tlv_capa_t)18,
        IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT         = (__force iwl_ucode_tlv_capa_t)19,
+       IWL_UCODE_TLV_CAPA_CSUM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)21,
        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = (__force iwl_ucode_tlv_capa_t)22,
        IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = (__force iwl_ucode_tlv_capa_t)28,
        IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
@@ -412,6 +414,12 @@ enum iwl_fw_dbg_reg_operator {
        PRPH_ASSIGN,
        PRPH_SETBIT,
        PRPH_CLEARBIT,
+
+       INDIRECT_ASSIGN,
+       INDIRECT_SETBIT,
+       INDIRECT_CLEARBIT,
+
+       PRPH_BLOCKBIT,
 };
 
 /**
index b5bc959b1dfe0bed54cf9423c6f01c23e332062e..6caf2affbbb52d5d0fc356db0f4b3d1360f4be8d 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -98,7 +99,8 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
                                continue;
 
                        for (i = 0; i < w->n_cmds; i++) {
-                               if (w->cmds[i] == pkt->hdr.cmd) {
+                               if (w->cmds[i] ==
+                                   WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
                                        found = true;
                                        break;
                                }
@@ -136,7 +138,7 @@ IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
 void
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
                           struct iwl_notification_wait *wait_entry,
-                          const u8 *cmds, int n_cmds,
+                          const u16 *cmds, int n_cmds,
                           bool (*fn)(struct iwl_notif_wait_data *notif_wait,
                                      struct iwl_rx_packet *pkt, void *data),
                           void *fn_data)
@@ -147,7 +149,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
        wait_entry->fn = fn;
        wait_entry->fn_data = fn_data;
        wait_entry->n_cmds = n_cmds;
-       memcpy(wait_entry->cmds, cmds, n_cmds);
+       memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
        wait_entry->triggered = false;
        wait_entry->aborted = false;
 
index 95af97a6c2cfa3886b6aa4fa5f1bb1213854cdf7..dbe8234521defb540ca545d3729b30e952bb4453 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -105,7 +106,7 @@ struct iwl_notification_wait {
                   struct iwl_rx_packet *pkt, void *data);
        void *fn_data;
 
-       u8 cmds[MAX_NOTIF_CMDS];
+       u16 cmds[MAX_NOTIF_CMDS];
        u8 n_cmds;
        bool triggered, aborted;
 };
@@ -121,7 +122,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
 void __acquires(wait_entry)
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
                           struct iwl_notification_wait *wait_entry,
-                          const u8 *cmds, int n_cmds,
+                          const u16 *cmds, int n_cmds,
                           bool (*fn)(struct iwl_notif_wait_data *notif_data,
                                      struct iwl_rx_packet *pkt, void *data),
                           void *fn_data);
index 80fefe7d7b8cb3b46581b32f299fed425e878980..3b8e85e51002560a0a860db0999ec947c29fb0d2 100644 (file)
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                hw_addr = (const u8 *)(mac_override +
                                 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
 
-               /* The byte order is little endian 16 bit, meaning 214365 */
-               data->hw_addr[0] = hw_addr[1];
-               data->hw_addr[1] = hw_addr[0];
-               data->hw_addr[2] = hw_addr[3];
-               data->hw_addr[3] = hw_addr[2];
-               data->hw_addr[4] = hw_addr[5];
-               data->hw_addr[5] = hw_addr[4];
+               /*
+                * Store the MAC address from MAO section.
+                * No byte swapping is required in MAO section
+                */
+               memcpy(data->hw_addr, hw_addr, ETH_ALEN);
 
                /*
                 * Force the use of the OTP MAC address in case of reserved MAC
index ce1cdd7604e8a156cf7356466b9232bdc176d9db..71b450adbda0d73d26b08200260e37fee0c76129 100644 (file)
@@ -148,8 +148,7 @@ struct iwl_op_mode_ops {
                                     const struct iwl_fw *fw,
                                     struct dentry *dbgfs_dir);
        void (*stop)(struct iwl_op_mode *op_mode);
-       int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
+       void (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb);
        void (*napi_add)(struct iwl_op_mode *op_mode,
                         struct napi_struct *napi,
                         struct net_device *napi_dev,
@@ -188,11 +187,10 @@ static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
        op_mode->ops->stop(op_mode);
 }
 
-static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd)
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
-       return op_mode->ops->rx(op_mode, rxb, cmd);
+       return op_mode->ops->rx(op_mode, rxb);
 }
 
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
index 5af1c776d2d4381fdd01e0044c4eeb5108236a18..cd98b9f45415b107cdbec91a3b09bd3449ca0a42 100644 (file)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS     (16)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK     (0x007F0000)
 #define SCD_GP_CTRL_ENABLE_31_QUEUES           BIT(0)
+#define SCD_GP_CTRL_AUTO_ACTIVE_MODE           BIT(18)
 
 /* Context Data */
 #define SCD_CONTEXT_MEM_LOWER_BOUND    (SCD_MEM_LOWER_BOUND + 0x600)
 
 /*********************** END TX SCHEDULER *************************************/
 
+/* tcp checksum offload */
+#define RX_EN_CSUM             (0x00a00d88)
+
 /* Oscillator clock */
 #define OSC_CLK                                (0xa04068)
 #define OSC_CLK_FORCE_CONTROL          (0x8)
index 87a230a7f4b605b8b6db80b69b3db7bc95b59e2f..2f79e54823c476bff7e5ad0a8501f7966b4803ab 100644 (file)
 #define INDEX_TO_SEQ(i)        ((i) & 0xff)
 #define SEQ_RX_FRAME   cpu_to_le16(0x8000)
 
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+       return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+       return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+       return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+       return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP  1
+
 /**
  * struct iwl_cmd_header
  *
  */
 struct iwl_cmd_header {
        u8 cmd;         /* Command ID:  REPLY_RXON, etc. */
-       u8 flags;       /* 0:5 reserved, 6 abort, 7 internal */
+       u8 group_id;
        /*
         * The driver sets up the sequence number to values of its choosing.
         * uCode does not use this value, but passes it back to the driver
@@ -154,9 +188,22 @@ struct iwl_cmd_header {
        __le16 sequence;
 } __packed;
 
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ */
+struct iwl_cmd_header_wide {
+       u8 cmd;
+       u8 group_id;
+       __le16 sequence;
+       __le16 length;
+       u8 reserved;
+       u8 version;
+} __packed;
 
 #define FH_RSCSR_FRAME_SIZE_MSK                0x00003FFF      /* bits 0-13 */
 #define FH_RSCSR_FRAME_INVALID         0x55550000
@@ -222,8 +269,18 @@ enum CMD_MODE {
  * aren't fully copied and use other TFD space.
  */
 struct iwl_device_cmd {
-       struct iwl_cmd_header hdr;      /* uCode API */
-       u8 payload[DEF_CMD_PAYLOAD_SIZE];
+       union {
+               struct {
+                       struct iwl_cmd_header hdr;      /* uCode API */
+                       u8 payload[DEF_CMD_PAYLOAD_SIZE];
+               };
+               struct {
+                       struct iwl_cmd_header_wide hdr_wide;
+                       u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
+                                       sizeof(struct iwl_cmd_header_wide) +
+                                       sizeof(struct iwl_cmd_header)];
+               };
+       };
 } __packed;
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@@ -261,24 +318,22 @@ enum iwl_hcmd_dataflag {
  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
  * @_rx_page_order: (internally used to free response packet)
  * @_rx_page_addr: (internally used to free response packet)
- * @handler_status: return value of the handler of the command
- *     (put in setup_rx_handlers) - valid for SYNC mode only
  * @flags: can be CMD_*
  * @len: array of the lengths of the chunks in data
  * @dataflags: IWL_HCMD_DFL_*
- * @id: id of the host command
+ * @id: command id of the host command, for wide commands encoding the
+ *     version and group as well
  */
 struct iwl_host_cmd {
        const void *data[IWL_MAX_CMD_TBS_PER_TFD];
        struct iwl_rx_packet *resp_pkt;
        unsigned long _rx_page_addr;
        u32 _rx_page_order;
-       int handler_status;
 
        u32 flags;
+       u32 id;
        u16 len[IWL_MAX_CMD_TBS_PER_TFD];
        u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
-       u8 id;
 };
 
 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
@@ -379,6 +434,7 @@ enum iwl_trans_status {
  * @bc_table_dword: set to true if the BC table expects the byte count to be
  *     in DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: firmware supports wide host command header
  * @command_names: array of command names, must be 256 entries
  *     (one for each command); for debugging only
  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@@ -396,6 +452,7 @@ struct iwl_trans_config {
        bool rx_buf_size_8k;
        bool bc_table_dword;
        bool scd_set_active;
+       bool wide_cmd_header;
        const char *const *command_names;
 
        u32 sdio_adma_addr;
@@ -544,7 +601,7 @@ struct iwl_trans_ops {
                              u32 value);
        void (*ref)(struct iwl_trans *trans);
        void (*unref)(struct iwl_trans *trans);
-       void (*suspend)(struct iwl_trans *trans);
+       int  (*suspend)(struct iwl_trans *trans);
        void (*resume)(struct iwl_trans *trans);
 
        struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
@@ -753,10 +810,12 @@ static inline void iwl_trans_unref(struct iwl_trans *trans)
                trans->ops->unref(trans);
 }
 
-static inline void iwl_trans_suspend(struct iwl_trans *trans)
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
 {
-       if (trans->ops->suspend)
-               trans->ops->suspend(trans);
+       if (!trans->ops->suspend)
+               return 0;
+
+       return trans->ops->suspend(trans);
 }
 
 static inline void iwl_trans_resume(struct iwl_trans *trans)
index 2d7c3ea3c4f8ba4db22fde31245ac43051f52a53..8c2c3d13b09233a3e944ab2ee3e502dde0415312 100644 (file)
@@ -6,6 +6,7 @@ iwlmvm-y += power.o coex.o coex_legacy.o
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
index b4737e296c927582063409917951a210e8ffa74e..e290ac67d97564bd0e85898d175fa4c12f11e6ed 100644 (file)
@@ -725,15 +725,17 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
        }
 }
 
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-                            struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
+               return;
+       }
 
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
        IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@@ -748,12 +750,6 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
        memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
        iwl_mvm_bt_coex_notif_handle(mvm);
-
-       /*
-        * This is an async handler for a notification, returning anything other
-        * than 0 doesn't make sense even if HCMD failed.
-        */
-       return 0;
 }
 
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -947,9 +943,8 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 ant_isolation = le32_to_cpup((void *)pkt->data);
@@ -957,20 +952,23 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        u8 __maybe_unused lower_bound, upper_bound;
        u8 lut;
 
-       if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-               return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
+       if (!fw_has_api(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+               iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
+               return;
+       }
 
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
-               return 0;
+               return;
 
        lockdep_assert_held(&mvm->mutex);
 
        /* Ignore updates if we are in force mode */
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return 0;
+               return;
 
        if (ant_isolation ==  mvm->last_ant_isol)
-               return 0;
+               return;
 
        for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
                if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -989,7 +987,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        mvm->last_ant_isol = ant_isolation;
 
        if (mvm->last_corun_lut == lut)
-               return 0;
+               return;
 
        mvm->last_corun_lut = lut;
 
@@ -1000,6 +998,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
        memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
               sizeof(cmd.corun_lut40));
 
-       return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
-                                   sizeof(cmd), &cmd);
+       if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
+                                sizeof(cmd), &cmd))
+               IWL_ERR(mvm,
+                       "failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
 }
index 6ac6de2af9779982231d1efb4c6186fad4442f5d..61c07b05fcaa67cb302f959c4f3f2ec6c919a379 100644 (file)
@@ -1058,9 +1058,8 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
                IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
@@ -1083,12 +1082,6 @@ int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
        memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
 
        iwl_mvm_bt_coex_notif_handle(mvm);
-
-       /*
-        * This is an async handler for a notification, returning anything other
-        * than 0 doesn't make sense even if HCMD failed.
-        */
-       return 0;
 }
 
 static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
@@ -1250,14 +1243,12 @@ void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+                                      struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u32 ant_isolation = le32_to_cpup((void *)pkt->data);
        u8 __maybe_unused lower_bound, upper_bound;
-       int ret;
        u8 lut;
 
        struct iwl_bt_coex_cmd_old *bt_cmd;
@@ -1268,16 +1259,16 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        };
 
        if (!iwl_mvm_bt_is_plcr_supported(mvm))
-               return 0;
+               return;
 
        lockdep_assert_held(&mvm->mutex);
 
        /* Ignore updates if we are in force mode */
        if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-               return 0;
+               return;
 
        if (ant_isolation ==  mvm->last_ant_isol)
-               return 0;
+               return;
 
        for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
                if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -1296,13 +1287,13 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        mvm->last_ant_isol = ant_isolation;
 
        if (mvm->last_corun_lut == lut)
-               return 0;
+               return;
 
        mvm->last_corun_lut = lut;
 
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
        if (!bt_cmd)
-               return 0;
+               return;
        cmd.data[0] = bt_cmd;
 
        bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
@@ -1317,8 +1308,8 @@ int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
        memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
               sizeof(bt_cmd->bt4_corun_lut40));
 
-       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       if (iwl_mvm_send_cmd(mvm, &cmd))
+               IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
 
        kfree(bt_cmd);
-       return ret;
 }
index beba375489f1cc53b2e4a97397c4159732083536..b8ee3121fbd23be4336a4a7fafffc72f37c46b06 100644 (file)
 #define IWL_MVM_QUOTA_THRESHOLD                        4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_DISABLE_P2P_MIMO            0
+#define IWL_MVM_TOF_IS_RESPONDER               0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
index 4165d104e4c379dde727f01d585904dec6030deb..04264e417c1c644e2b362e9bf29760489cfcae4f 100644 (file)
@@ -1145,7 +1145,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_d3;
-       static const u8 d3_notif[] = { D3_CONFIG_CMD };
+       static const u16 d3_notif[] = { D3_CONFIG_CMD };
        int ret;
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
@@ -1168,13 +1168,17 @@ remove_notif:
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       ret = iwl_trans_suspend(mvm->trans);
+       if (ret)
+               return ret;
 
-       iwl_trans_suspend(mvm->trans);
        mvm->trans->wowlan_d0i3 = wowlan->any;
        if (mvm->trans->wowlan_d0i3) {
                /* 'any' trigger means d0i3 usage */
                if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-                       int ret = iwl_mvm_enter_d0i3_sync(mvm);
+                       ret = iwl_mvm_enter_d0i3_sync(mvm);
 
                        if (ret)
                                return ret;
@@ -1183,6 +1187,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                mutex_lock(&mvm->d0i3_suspend_mutex);
                __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
                mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+               iwl_trans_d3_suspend(mvm->trans, false);
+
                return 0;
        }
 
@@ -1935,28 +1942,59 @@ out:
        return 1;
 }
 
-int iwl_mvm_resume(struct ieee80211_hw *hw)
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
 {
-       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       iwl_trans_resume(mvm->trans);
+
+       return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+       bool exit_now;
+       enum iwl_d3_status d3_status;
+
+       iwl_trans_d3_resume(mvm->trans, &d3_status, false);
+
+       /*
+        * make sure to clear D0I3_DEFER_WAKEUP before
+        * calling iwl_trans_resume(), which might wait
+        * for d0i3 exit completion.
+        */
+       mutex_lock(&mvm->d0i3_suspend_mutex);
+       __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+       exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+                                       &mvm->d0i3_suspend_flags);
+       mutex_unlock(&mvm->d0i3_suspend_mutex);
+       if (exit_now) {
+               IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+               _iwl_mvm_exit_d0i3(mvm);
+       }
 
        iwl_trans_resume(mvm->trans);
 
-       if (mvm->hw->wiphy->wowlan_config->any) {
-               /* 'any' trigger means d0i3 usage */
-               if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-                       int ret = iwl_mvm_exit_d0i3(hw->priv);
+       if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+               int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
 
-                       if (ret)
-                               return ret;
-                       /*
-                        * d0i3 exit will be deferred until reconfig_complete.
-                        * make sure there we are out of d0i3.
-                        */
-               }
-               return 0;
+               if (ret)
+                       return ret;
+               /*
+                * d0i3 exit will be deferred until reconfig_complete.
+                * make sure there we are out of d0i3.
+                */
        }
+       return 0;
+}
 
-       return __iwl_mvm_resume(mvm, false);
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       /* 'any' trigger means d0i3 was used */
+       if (hw->wiphy->wowlan_config->any)
+               return iwl_mvm_resume_d0i3(mvm);
+       else
+               return iwl_mvm_resume_d3(mvm);
 }
 
 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
index 5c8a65de0e775a2327d392f617c4433bdd096f6d..ddb1c844827b9f1b8bdbd8223a20f6f85750e4cb 100644 (file)
@@ -63,6 +63,7 @@
  *
  *****************************************************************************/
 #include "mvm.h"
+#include "fw-api-tof.h"
 #include "debugfs.h"
 
 static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -497,6 +498,731 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+       int len = strlen(name);
+
+       return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+                                         char *buf,
+                                         size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = -EINVAL;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("tof_disabled=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.tof_disabled = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.one_sided_disabled = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.is_debug_mode = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("is_buf=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.tof_cfg.is_buf_required = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_config_cmd(mvm);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+                                        char __user *user_buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_config_cmd *cmd;
+
+       cmd = &mvm->tof_data.tof_cfg;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+                        cmd->tof_disabled);
+       pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+                        cmd->one_sided_disabled);
+       pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+                        cmd->is_debug_mode);
+       pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+                        cmd->is_buf_required);
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+                                                   char *buf,
+                                                   size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("burst_period=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (!ret)
+                       mvm->tof_data.responder_cfg.burst_period =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.min_delta_ftm = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("burst_duration=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.burst_duration = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("abort_responder=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.abort_responder = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("get_ch_est=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.get_ch_est = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("channel_num=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.channel_num = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("bandwidth=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.bandwidth = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("rate=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.rate = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("bssid=", buf);
+       if (data) {
+               u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("toa_offset=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.toa_offset =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ctrl_ch_position = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ftm_per_burst = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("asap_mode=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.responder_cfg.asap_mode = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+                                                  char __user *user_buf,
+                                                  size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_responder_config_cmd *cmd;
+
+       cmd = &mvm->tof_data.responder_cfg;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+                        le16_to_cpu(cmd->burst_period));
+       pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+                        cmd->burst_duration);
+       pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+                        cmd->bandwidth);
+       pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+                        cmd->channel_num);
+       pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+                        cmd->ctrl_ch_position);
+       pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+                        cmd->bssid);
+       pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+                        cmd->min_delta_ftm);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+                        cmd->num_of_burst_exp);
+       pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+       pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+                        cmd->abort_responder);
+       pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+                        cmd->get_ch_est);
+       pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+                        cmd->recv_sta_req_params);
+       pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+                        cmd->ftm_per_burst);
+       pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+                        cmd->ftm_resp_ts_avail);
+       pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+                        cmd->asap_mode);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "tsf_timer_offset_msecs = %d\n",
+                        le16_to_cpu(cmd->tsf_timer_offset_msecs));
+       pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+                        le16_to_cpu(cmd->toa_offset));
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+                                                char *buf, size_t count,
+                                                loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("request_id=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.request_id = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("initiator=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.initiator = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.one_sided_los_disable = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("req_timeout=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.req_timeout = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("report_policy=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.report_policy = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_random=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.macaddr_random = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("num_of_ap=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req.num_of_ap = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_template=", buf);
+       if (data) {
+               u8 mac[ETH_ALEN];
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+       }
+
+       data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+       if (data) {
+               u8 mac[ETH_ALEN];
+
+               if (!mac_pton(data, mac)) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+       }
+
+       data = iwl_dbgfs_is_match("ap=", buf);
+       if (data) {
+               struct iwl_tof_range_req_ap_entry ap;
+               int size = sizeof(struct iwl_tof_range_req_ap_entry);
+               u16 burst_period;
+               u8 *mac = ap.bssid;
+               int i;
+
+               if (sscanf(data, "%d %hhd %hhx %hhx"
+                          "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+                          "%hhx %hhx %hx"
+                          "%hhx %hhx %x"
+                          "%hhx %hhx %hhx %hhx",
+                          &i, &ap.channel_num, &ap.bandwidth,
+                          &ap.ctrl_ch_position,
+                          mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+                          &ap.measure_type, &ap.num_of_bursts,
+                          &burst_period,
+                          &ap.samples_per_burst, &ap.retries_per_sample,
+                          &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+                          &ap.enable_dyn_ack, &ap.rssi) != 20) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               if (i > IWL_MVM_TOF_MAX_APS) {
+                       IWL_ERR(mvm, "Invalid AP index %d\n", i);
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               ap.burst_period = cpu_to_le16(burst_period);
+
+               memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_request=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[512];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_range_req_cmd *cmd;
+       int i;
+
+       cmd = &mvm->tof_data.range_req;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+                        cmd->request_id);
+       pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+                        cmd->initiator);
+       pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+                        cmd->one_sided_los_disable);
+       pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+                        cmd->req_timeout);
+       pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+                        cmd->report_policy);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+                        cmd->macaddr_random);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+                        cmd->macaddr_template);
+       pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+                        cmd->macaddr_mask);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+                        cmd->num_of_ap);
+       for (i = 0; i < cmd->num_of_ap; i++) {
+               struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "ap %.2d: channel_num=%hhx bw=%hhx"
+                               " control=%hhx bssid=%pM type=%hhx"
+                               " num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
+                               " retries=%hhx tsf_delta=%x location_req=%hhx "
+                               " asap=%hhx enable=%hhx rssi=%hhx\n",
+                               i, ap->channel_num, ap->bandwidth,
+                               ap->ctrl_ch_position, ap->bssid,
+                               ap->measure_type, ap->num_of_bursts,
+                               ap->burst_period, ap->samples_per_burst,
+                               ap->retries_per_sample, ap->tsf_delta,
+                               ap->location_req, ap->asap_mode,
+                               ap->enable_dyn_ack, ap->rssi);
+       }
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+                                                char *buf,
+                                                size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+                                                       cpu_to_le16(value);
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.min_delta_ftm = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+                                                                       value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+                                               char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[256];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       struct iwl_tof_range_req_ext_cmd *cmd;
+
+       cmd = &mvm->tof_data.range_req_ext;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "tsf_timer_offset_msec = %hx\n",
+                        cmd->tsf_timer_offset_msec);
+       pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+                        cmd->min_delta_ftm);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw20M = %hhx\n",
+                        cmd->ftm_format_and_bw20M);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw40M = %hhx\n",
+                        cmd->ftm_format_and_bw40M);
+       pos += scnprintf(buf + pos, bufsz - pos,
+                        "ftm_format_and_bw80M = %hhx\n",
+                        cmd->ftm_format_and_bw80M);
+
+       mutex_unlock(&mvm->mutex);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+                                              char *buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       int value, ret = 0;
+       int abort_id;
+       char *data;
+
+       mutex_lock(&mvm->mutex);
+
+       data = iwl_dbgfs_is_match("abort_id=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0)
+                       mvm->tof_data.last_abort_id = value;
+               goto out;
+       }
+
+       data = iwl_dbgfs_is_match("send_range_abort=", buf);
+       if (data) {
+               ret = kstrtou32(data, 10, &value);
+               if (ret == 0 && value) {
+                       abort_id = mvm->tof_data.last_abort_id;
+                       ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+                                             char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char buf[32];
+       int pos = 0;
+       const size_t bufsz = sizeof(buf);
+       int last_abort_id;
+
+       mutex_lock(&mvm->mutex);
+       last_abort_id = mvm->tof_data.last_abort_id;
+       mutex_unlock(&mvm->mutex);
+
+       pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+                        last_abort_id);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+                                                char __user *user_buf,
+                                                size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       char *buf;
+       int pos = 0;
+       const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+       struct iwl_tof_range_rsp_ntfy *cmd;
+       int i, ret;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       mutex_lock(&mvm->mutex);
+       cmd = &mvm->tof_data.range_resp;
+
+       pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+                        cmd->request_id);
+       pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+                        cmd->request_status);
+       pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+                        cmd->last_in_batch);
+       pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+                        cmd->num_of_aps);
+       for (i = 0; i < cmd->num_of_aps; i++) {
+               struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+               pos += scnprintf(buf + pos, bufsz - pos,
+                               "ap %.2d: bssid=%pM status=%hhx bw=%hhx"
+                               " rtt=%x rtt_var=%x rtt_spread=%x"
+                               " rssi=%hhx  rssi_spread=%hhx"
+                               " range=%x range_var=%x"
+                               " time_stamp=%x\n",
+                               i, ap->bssid, ap->measure_status,
+                               ap->measure_bw,
+                               ap->rtt, ap->rtt_variance, ap->rtt_spread,
+                               ap->rssi, ap->rssi_spread, ap->range,
+                               ap->range_variance, ap->timestamp);
+       }
+       mutex_unlock(&mvm->mutex);
+
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+       return ret;
+}
+
 static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
                                           size_t count, loff_t *ppos)
 {
@@ -628,6 +1354,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -671,6 +1403,25 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
                                         S_IRUSR | S_IWUSR);
 
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+           !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+               if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+                       MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+                                                mvmvif->dbgfs_dir,
+                                                S_IRUSR | S_IWUSR);
+
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+                                        S_IRUSR | S_IWUSR);
+               MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+                                        S_IRUSR);
+       }
+
        /*
         * Create symlink for convenience pointing to interface specific
         * debugfs entries for the driver. For example, under
index b1baa33cc19b3228a8534af71ab69c7dede40520..b86b1697d56f6807011faa40188094f47140a99a 100644 (file)
@@ -413,7 +413,7 @@ struct iwl_beacon_filter_cmd {
 #define IWL_BF_TEMP_FAST_FILTER_MIN 0
 
 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
-#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255
 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0
 
index 5e4cbdb44c607ec8399bae489f28990dc907eaeb..660cc1c93e192654345b96b5b4a6a9c9589dc746 100644 (file)
@@ -87,41 +87,6 @@ struct iwl_ssid_ie {
        u8 ssid[IEEE80211_MAX_SSID_LEN];
 } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
 
-/* How many statistics are gathered for each channel */
-#define SCAN_RESULTS_STATISTICS 1
-
-/**
- * enum iwl_scan_complete_status - status codes for scan complete notifications
- * @SCAN_COMP_STATUS_OK:  scan completed successfully
- * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
- * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
- * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
- * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
- * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
- * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
- * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
- * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
- * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
- *     (not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
- *     asked for
- * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
-*/
-enum iwl_scan_complete_status {
-       SCAN_COMP_STATUS_OK = 0x1,
-       SCAN_COMP_STATUS_ABORT = 0x2,
-       SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
-       SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
-       SCAN_COMP_STATUS_ERR_PROBE = 0x5,
-       SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
-       SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
-       SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
-       SCAN_COMP_STATUS_ERR_COEX = 0x9,
-       SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
-       SCAN_COMP_STATUS_ITERATION_END = 0x0B,
-       SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
-};
-
 /* scan offload */
 #define IWL_SCAN_MAX_BLACKLIST_LEN     64
 #define IWL_SCAN_SHORT_BLACKLIST_LEN   16
@@ -143,71 +108,6 @@ enum scan_framework_client {
        SCAN_CLIENT_ASSET_TRACKING      = BIT(2),
 };
 
-/**
- * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
- * @scan_flags:                see enum iwl_scan_flags
- * @channel_count:     channels in channel list
- * @quiet_time:                dwell time, in milliseconds, on quiet channel
- * @quiet_plcp_th:     quiet channel num of packets threshold
- * @good_CRC_th:       passive to active promotion threshold
- * @rx_chain:          RXON rx chain.
- * @max_out_time:      max TUs to be out of associated channel
- * @suspend_time:      pause scan this TUs when returning to service channel
- * @flags:             RXON flags
- * @filter_flags:      RXONfilter
- * @tx_cmd:            tx command for active scan; for 2GHz and for 5GHz.
- * @direct_scan:       list of SSIDs for directed active scan
- * @scan_type:         see enum iwl_scan_type.
- * @rep_count:         repetition count for each scheduled scan iteration.
- */
-struct iwl_scan_offload_cmd {
-       __le16 len;
-       u8 scan_flags;
-       u8 channel_count;
-       __le16 quiet_time;
-       __le16 quiet_plcp_th;
-       __le16 good_CRC_th;
-       __le16 rx_chain;
-       __le32 max_out_time;
-       __le32 suspend_time;
-       /* RX_ON_FLAGS_API_S_VER_1 */
-       __le32 flags;
-       __le32 filter_flags;
-       struct iwl_tx_cmd tx_cmd[2];
-       /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-       __le32 scan_type;
-       __le32 rep_count;
-} __packed;
-
-enum iwl_scan_offload_channel_flags {
-       IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE         = BIT(0),
-       IWL_SCAN_OFFLOAD_CHANNEL_NARROW         = BIT(22),
-       IWL_SCAN_OFFLOAD_CHANNEL_FULL           = BIT(24),
-       IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL        = BIT(25),
-};
-
-/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
- * __le32 type:        bitmap; bits 1-20 are for directed scan to i'th ssid and
- *     see enum iwl_scan_offload_channel_flags.
- * __le16 channel_number: channel number 1-13 etc.
- * __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two iterations on one channel.
- * u8 active_dwell.
- * u8 passive_dwell.
- */
-#define IWL_SCAN_CHAN_SIZE 14
-
-/**
- * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
- * @scan_cmd:          scan command fixed part
- * @data:              scan channel configuration and probe request frames
- */
-struct iwl_scan_offload_cfg {
-       struct iwl_scan_offload_cmd scan_cmd;
-       u8 data[0];
-} __packed;
-
 /**
  * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
  * @ssid:              MAC address to filter out
@@ -297,35 +197,6 @@ enum iwl_scan_ebs_status {
        IWL_SCAN_EBS_INACTIVE,
 };
 
-/**
- * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
- * @last_schedule_line:                last schedule line executed (fast or regular)
- * @last_schedule_iteration:   last scan iteration executed before scan abort
- * @status:                    enum iwl_scan_offload_compleate_status
- * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
- */
-struct iwl_scan_offload_complete {
-       u8 last_schedule_line;
-       u8 last_schedule_iteration;
-       u8 status;
-       u8 ebs_status;
-} __packed;
-
-/**
- * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
- * @ssid_bitmap:       SSIDs indexes found in this iteration
- * @client_bitmap:     clients that are active and wait for this notification
- */
-struct iwl_sched_scan_results {
-       __le16 ssid_bitmap;
-       u8 client_bitmap;
-       u8 reserved;
-};
-
-/* Unified LMAC scan API */
-
-#define IWL_MVM_BASIC_PASSIVE_DWELL 110
-
 /**
  * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
  * @tx_flags: combination of TX_CMD_FLG_*
@@ -550,18 +421,6 @@ struct iwl_periodic_scan_complete {
 
 /* UMAC Scan API */
 
-/**
- * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
- * @size:      size of the command (not including header)
- * @reserved0: for future use and alignment
- * @ver:       API version number
- */
-struct iwl_mvm_umac_cmd_hdr {
-       __le16 size;
-       u8 reserved0;
-       u8 ver;
-} __packed;
-
 /* The maximum of either of these cannot exceed 8, because we use an
  * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
  */
@@ -621,7 +480,6 @@ enum iwl_channel_flags {
 
 /**
  * struct iwl_scan_config
- * @hdr: umac command header
  * @flags:                     enum scan_config_flags
  * @tx_chains:                 valid_tx antenna - ANT_* definitions
  * @rx_chains:                 valid_rx antenna - ANT_* definitions
@@ -639,7 +497,6 @@ enum iwl_channel_flags {
  * @channel_array:             default supported channels
  */
 struct iwl_scan_config {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
@@ -660,7 +517,8 @@ struct iwl_scan_config {
  * iwl_umac_scan_flags
  *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
  *     can be preempted by other scan requests with higher priority.
- *     The low priority scan is aborted.
+ *     The low priority scan will be resumed when the higher proirity scan is
+ *     completed.
  *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
  *     when scan starts.
  */
@@ -734,7 +592,6 @@ struct iwl_scan_req_umac_tail {
 
 /**
  * struct iwl_scan_req_umac
- * @hdr: umac command header
  * @flags: &enum iwl_umac_scan_flags
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @ooc_priority: out of channel priority - &enum iwl_scan_priority
@@ -753,7 +610,6 @@ struct iwl_scan_req_umac_tail {
  *     &struct iwl_scan_req_umac_tail
  */
 struct iwl_scan_req_umac {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 flags;
        __le32 uid;
        __le32 ooc_priority;
@@ -775,12 +631,10 @@ struct iwl_scan_req_umac {
 
 /**
  * struct iwl_umac_scan_abort
- * @hdr: umac command header
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @flags: reserved
  */
 struct iwl_umac_scan_abort {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 uid;
        __le32 flags;
 } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
new file mode 100644 (file)
index 0000000..eed6271
--- /dev/null
@@ -0,0 +1,386 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __fw_api_tof_h__
+#define __fw_api_tof_h__
+
+#include "fw-api.h"
+
+/* ToF sub-group command IDs */
+enum iwl_mvm_tof_sub_grp_ids {
+       TOF_RANGE_REQ_CMD = 0x1,
+       TOF_CONFIG_CMD = 0x2,
+       TOF_RANGE_ABORT_CMD = 0x3,
+       TOF_RANGE_REQ_EXT_CMD = 0x4,
+       TOF_RESPONDER_CONFIG_CMD = 0x5,
+       TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+       TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+       TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+       TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+       TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+       TOF_MCSI_DEBUG_NOTIF = 0xFB,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: 0 enabled, 1 - disabled
+ * @one_sided_disabled: 0 enabled, 1 - disabled
+ * @is_debug_mode: 1 debug mode, 0 - otherwise
+ * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
+ */
+struct iwl_tof_config_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 tof_disabled;
+       u8 one_sided_disabled;
+       u8 is_debug_mode;
+       u8 is_buf_required;
+} __packed;
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @burst_period: future use: (currently hard coded in the LMAC)
+ *               The interval between two sequential bursts.
+ * @min_delta_ftm: future use: (currently hard coded in the LMAC)
+ *                The minimum delay between two sequential FTM Responses
+ *                in the same burst.
+ * @burst_duration: future use: (currently hard coded in the LMAC)
+ *                The total time for all FTMs handshake in the same burst.
+ *                Affect the time events duration in the LMAC.
+ * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
+ *                The number of bursts for the current ToF request. Affect
+ *                the number of events allocations in the current iteration.
+ * @get_ch_est: for xVT only, NA for driver
+ * @abort_responder: when set to '1' - Responder will terminate its activity
+ *                  (all other fields in the command are ignored)
+ * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
+ *                      params and use the recomended Initiator params.
+ *                      0 - otherwise
+ * @channel_num: current AP Channel
+ * @bandwidth: current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rate: current AP rate
+ * @ctrl_ch_position: coding of the control channel position relative to
+ *          the center frequency.
+ *          40MHz  0 below center, 1 above center
+ *          80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *                              1  the far  20MHz to the center
+ *                 bit[2]  as above 40MHz
+ * @ftm_per_burst: FTMs per Burst
+ * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
+ *               '1' - we measure over the Initial FTM Response
+ * @asap_mode: ASAP / Non ASAP mode for the current WLS station
+ * @sta_id: index of the AP STA when in AP mode
+ * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
+ * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
+ *             purposes, simulating station movement by adding various values
+ *             to this field
+ * @bssid: Current AP BSSID
+ */
+struct iwl_tof_responder_config_cmd {
+       __le32 sub_grp_cmd_id;
+       __le16 burst_period;
+       u8 min_delta_ftm;
+       u8 burst_duration;
+       u8 num_of_burst_exp;
+       u8 get_ch_est;
+       u8 abort_responder;
+       u8 recv_sta_req_params;
+       u8 channel_num;
+       u8 bandwidth;
+       u8 rate;
+       u8 ctrl_ch_position;
+       u8 ftm_per_burst;
+       u8 ftm_resp_ts_avail;
+       u8 asap_mode;
+       u8 sta_id;
+       __le16 tsf_timer_offset_msecs;
+       __le16 toa_offset;
+       u8 bssid[ETH_ALEN];
+} __packed;
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ *                in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ *                     value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ *                     value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ *                     value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+       __le32 sub_grp_cmd_id;
+       __le16 tsf_timer_offset_msec;
+       __le16 reserved;
+       u8 min_delta_ftm;
+       u8 ftm_format_and_bw20M;
+       u8 ftm_format_and_bw40M;
+       u8 ftm_format_and_bw80M;
+} __packed;
+
+#define IWL_MVM_TOF_MAX_APS 21
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ *          center frequency.
+ *          40MHz  0 below center, 1 above center
+ *          80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *                              1  the far  20MHz to the center
+ *                 bit[2]  as above 40MHz
+ * @bssid: AP's bss id
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP.  2s Exponent of the
+ *                number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ *               periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
+ *                    1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ *                     in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ *            The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
+ *                           Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ *         0  Initiator interact with regular AP
+ *         1  Initiator interact with Responder machine: need to send the
+ *         Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ *         use it for its ch est measurement (this flag will be set when we
+ *         configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ *       leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ */
+struct iwl_tof_range_req_ap_entry {
+       u8 channel_num;
+       u8 bandwidth;
+       u8 tsf_delta_direction;
+       u8 ctrl_ch_position;
+       u8 bssid[ETH_ALEN];
+       u8 measure_type;
+       u8 num_of_bursts;
+       __le16 burst_period;
+       u8 samples_per_burst;
+       u8 retries_per_sample;
+       __le32 tsf_delta;
+       u8 location_req;
+       u8 asap_mode;
+       u8 enable_dyn_ack;
+       s8 rssi;
+} __packed;
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
+ *                           possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
+ *                              timeout expiration
+ * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
+ *                               earlier of: measurements completion / timeout
+ *                               expiration.
+ */
+enum iwl_tof_response_mode {
+       IWL_MVM_TOF_RESPOSE_ASAP = 1,
+       IWL_MVM_TOF_RESPOSE_TIMEOUT,
+       IWL_MVM_TOF_RESPOSE_COMPLETE,
+};
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @request_id: A Token incremented per request. The same Token will be
+ *             sent back in the range response
+ * @initiator: 0- NW initiated,  1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ *                        '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ *          This is equivalent to the session time configured to the
+ *          LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ *                the range report will be uploaded as a batch when ready or
+ *                when the session is done (successfully / partially).
+ *                one of iwl_tof_response_mode.
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ *                 '1' Use MAC Address randomization according to the below
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ *               Bits set to 1 shall be randomized by the UMAC
+ */
+struct iwl_tof_range_req_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 request_id;
+       u8 initiator;
+       u8 one_sided_los_disable;
+       u8 req_timeout;
+       u8 report_policy;
+       u8 los_det_disable;
+       u8 num_of_ap;
+       u8 macaddr_random;
+       u8 macaddr_template[ETH_ALEN];
+       u8 macaddr_mask[ETH_ALEN];
+       struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+/**
+ * struct iwl_tof_gen_resp_cmd - generic ToF response
+ */
+struct iwl_tof_gen_resp_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @measure_status: current APs measurement status
+ * @measure_bw: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ *      current AP [nSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ *            values measured for current AP in the current session [nsec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ *             measured for current AP in the current session
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ *            uploaded by the LMAC
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+       u8 bssid[ETH_ALEN];
+       u8 measure_status;
+       u8 measure_bw;
+       __le32 rtt;
+       __le32 rtt_variance;
+       __le32 rtt_spread;
+       s8 rssi;
+       u8 rssi_spread;
+       __le16 reserved;
+       __le32 range;
+       __le32 range_variance;
+       __le32 timestamp;
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ntfy -
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ */
+struct iwl_tof_range_rsp_ntfy {
+       u8 request_id;
+       u8 request_status;
+       u8 last_in_batch;
+       u8 num_of_aps;
+       struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE  (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+       u8 token;
+       u8 role;
+       __le16 reserved;
+       u8 initiator_bssid[ETH_ALEN];
+       u8 responder_bssid[ETH_ALEN];
+       u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_neighbor_report_notif
+ * @bssid: BSSID of the AP which sent the report
+ * @request_token: same token as the corresponding request
+ * @status:
+ * @report_ie_len: the length of the response frame starting from the Element ID
+ * @data: the IEs
+ */
+struct iwl_tof_neighbor_report {
+       u8 bssid[ETH_ALEN];
+       u8 request_token;
+       u8 status;
+       __le16 report_ie_len;
+       u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ */
+struct iwl_tof_range_abort_cmd {
+       __le32 sub_grp_cmd_id;
+       u8 request_id;
+       u8 reserved[3];
+} __packed;
+
+#endif
index 16e9ef49397f4d055b788f970f1b74bf2712e5e7..4e29c11cc96962e494061f90bbfc0da96fd48c21 100644 (file)
@@ -75,6 +75,7 @@
 #include "fw-api-coex.h"
 #include "fw-api-scan.h"
 #include "fw-api-stats.h"
+#include "fw-api-tof.h"
 
 /* Tx queue numbers */
 enum {
@@ -163,6 +164,10 @@ enum {
        CALIB_RES_NOTIF_PHY_DB = 0x6b,
        /* PHY_DB_CMD = 0x6c, */
 
+       /* ToF - 802.11mc FTM */
+       TOF_CMD = 0x10,
+       TOF_NOTIFICATION = 0x11,
+
        /* Power - legacy power table command */
        POWER_TABLE_CMD = 0x77,
        PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
@@ -1080,10 +1085,33 @@ struct iwl_rx_phy_info {
        __le16 frame_time;
 } __packed;
 
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+       CSUM_RXA_RESERVED_MASK  = 0x000f,
+       CSUM_RXA_MICSIZE_MASK   = 0x00f0,
+       CSUM_RXA_HEADERLEN_MASK = 0x1f00,
+       CSUM_RXA_PADD           = BIT(13),
+       CSUM_RXA_AMSDU          = BIT(14),
+       CSUM_RXA_ENA            = BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @assist: see CSUM_RX_ASSIST_ above
+ */
 struct iwl_rx_mpdu_res_start {
        __le16 byte_count;
-       __le16 reserved;
-} __packed;
+       __le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
 
 /**
  * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
@@ -1136,6 +1164,8 @@ enum iwl_rx_phy_flags {
  * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
  * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
  * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
  * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
  * @RX_MPDU_RES_STATUS_STA_ID_MSK:
  * @RX_MPDU_RES_STATUS_RRF_KILL:
@@ -1165,6 +1195,8 @@ enum iwl_mvm_rx_status {
        RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP               = BIT(13),
        RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT               = BIT(14),
        RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME             = BIT(15),
+       RX_MPDU_RES_STATUS_CSUM_DONE                    = BIT(16),
+       RX_MPDU_RES_STATUS_CSUM_OK                      = BIT(17),
        RX_MPDU_RES_STATUS_HASH_INDEX_MSK               = (0x3F0000),
        RX_MPDU_RES_STATUS_STA_ID_MSK                   = (0x1f000000),
        RX_MPDU_RES_STATUS_RRF_KILL                     = BIT(29),
index eb10c5ee4a1407c5b02babe009a899e9f747d6c9..106edc78c8bc076177ef45091871b3c65836c5e5 100644 (file)
@@ -213,7 +213,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
        const struct fw_img *fw;
        int ret, i;
        enum iwl_ucode_type old_type = mvm->cur_ucode;
-       static const u8 alive_cmd[] = { MVM_ALIVE };
+       static const u16 alive_cmd[] = { MVM_ALIVE };
        struct iwl_sf_region st_fwrd_space;
 
        if (ucode_type == IWL_UCODE_REGULAR &&
@@ -314,7 +314,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 {
        struct iwl_notification_wait calib_wait;
-       static const u8 init_complete[] = {
+       static const u16 init_complete[] = {
                INIT_COMPLETE_NOTIF,
                CALIB_RES_NOTIF_PHY_DB
        };
@@ -444,12 +444,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
                return;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
-                       pkt->hdr.flags);
-               goto exit;
-       }
-
        mem_cfg = (void *)pkt->data;
 
        mvm->shared_mem_cfg.shared_mem_addr =
@@ -473,7 +467,6 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
                le32_to_cpu(mem_cfg->page_buff_size);
        IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
-exit:
        iwl_free_resp(&cmd);
 }
 
@@ -676,8 +669,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
        }
 
-       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
-               iwl_mvm_get_shared_mem_conf(mvm);
+       iwl_mvm_get_shared_mem_conf(mvm);
 
        ret = iwl_mvm_sf_update(mvm, NULL, false);
        if (ret)
@@ -760,6 +752,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                        goto error;
        }
 
+       if (iwl_mvm_is_csum_supported(mvm) &&
+           mvm->cfg->features & NETIF_F_RXCSUM)
+               iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
+
        /* allow FW/transport low power modes if not during restart */
        if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
                iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -815,9 +811,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -828,13 +823,10 @@ int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
                          (flags & SW_CARD_DISABLED) ? "Kill" : "On",
                          (flags & CT_KILL_CARD_DISABLED) ?
                          "Reached" : "Not reached");
-
-       return 0;
 }
 
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
@@ -845,5 +837,4 @@ int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
                       le32_to_cpu(mfuart_notif->external_ver),
                       le32_to_cpu(mfuart_notif->status),
                       le32_to_cpu(mfuart_notif->duration));
-       return 0;
 }
index 1812dd018af27628bdc87bfae780e3de3735bea3..3424315dd876de13f9a64da6b7703f51103ce87e 100644 (file)
@@ -1312,9 +1312,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
        }
 }
 
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
@@ -1365,8 +1364,6 @@ int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                        RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
                }
        }
-
-       return 0;
 }
 
 static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
@@ -1415,9 +1412,8 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
                iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
 }
 
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
@@ -1434,5 +1430,4 @@ int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
                                                   IEEE80211_IFACE_ITER_NORMAL,
                                                   iwl_mvm_beacon_loss_iterator,
                                                   mb);
-       return 0;
 }
index dfdab38e2d4ad5d84d8a4b30b51cfb846e44c179..9e641847c0472a12ced4d44bc994e51706e3e3bb 100644 (file)
@@ -649,6 +649,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
        }
 
+       hw->netdev_features |= mvm->cfg->features;
+       if (!iwl_mvm_is_csum_supported(mvm))
+               hw->netdev_features &= ~NETIF_F_RXCSUM;
+
        ret = ieee80211_register_hw(mvm->hw);
        if (ret)
                iwl_mvm_leds_exit(mvm);
@@ -1433,22 +1437,9 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
 
 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
 {
-       bool exit_now;
-
        if (!iwl_mvm_is_d0i3_supported(mvm))
                return;
 
-       mutex_lock(&mvm->d0i3_suspend_mutex);
-       __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
-       exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
-                                       &mvm->d0i3_suspend_flags);
-       mutex_unlock(&mvm->d0i3_suspend_mutex);
-
-       if (exit_now) {
-               IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
-               _iwl_mvm_exit_d0i3(mvm);
-       }
-
        if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
                if (!wait_event_timeout(mvm->d0i3_exit_waitq,
                                        !test_bit(IWL_MVM_STATUS_IN_D0I3,
@@ -1664,6 +1655,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                goto out_unlock;
        }
 
+       mvmvif->features |= hw->netdev_features;
+
        ret = iwl_mvm_mac_ctxt_add(mvm, vif);
        if (ret)
                goto out_release;
@@ -2880,10 +2873,11 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
        switch (key->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-               /* fall-through */
-       case WLAN_CIPHER_SUITE_CCMP:
                key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
                break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+               break;
        case WLAN_CIPHER_SUITE_AES_CMAC:
                WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
                break;
@@ -3025,7 +3019,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
        int res, time_reg = DEVICE_SYSTEM_TIME_REG;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
-       static const u8 time_event_response[] = { HOT_SPOT_CMD };
+       static const u16 time_event_response[] = { HOT_SPOT_CMD };
        struct iwl_notification_wait wait_time_event;
        struct iwl_hs20_roc_req aux_roc_req = {
                .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
index 605f57a2c6be4c76f07a36cd99ccbc91d81f44c4..6b0956a9129a5aec7966770770db693ec453780c 100644 (file)
@@ -80,6 +80,7 @@
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
+#include "tof.h"
 
 #define IWL_INVALID_MAC80211_QUEUE     0xff
 #define IWL_MVM_MAX_ADDRESSES          5
@@ -122,8 +123,7 @@ extern const struct ieee80211_ops iwl_mvm_hw_ops;
  *     be up'ed after the INIT fw asserted. This is useful to be able to use
  *     proprietary tools over testmode to debug the INIT fw.
  * @tfd_q_hang_detect: enabled the detection of hung transmit queues
- * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
- *     Save)-2(default), LP(Low Power)-3
+ * @power_scheme: one of enum iwl_power_scheme
  */
 struct iwl_mvm_mod_params {
        bool init_dbg;
@@ -357,6 +357,7 @@ struct iwl_mvm_vif_bf_data {
  *     # of received beacons accumulated over FW restart, and the current
  *     average signal of beacons retrieved from the firmware
  * @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
  */
 struct iwl_mvm_vif {
        struct iwl_mvm *mvm;
@@ -437,6 +438,9 @@ struct iwl_mvm_vif {
        /* Indicates that CSA countdown may be started */
        bool csa_countdown;
        bool csa_failed;
+
+       /* TCP Checksum Offload */
+       netdev_features_t features;
 };
 
 static inline struct iwl_mvm_vif *
@@ -687,6 +691,7 @@ struct iwl_mvm {
         * can hold 16 keys at most. Reflect this fact.
         */
        unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+       u8 fw_key_deleted[STA_KEY_MAX_NUM];
 
        /* references taken by the driver and spinlock protecting them */
        spinlock_t refs_lock;
@@ -823,6 +828,7 @@ struct iwl_mvm {
        struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
 
        u32 ciphers[6];
+       struct iwl_mvm_tof_data tof_data;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -942,6 +948,12 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
                IWL_MVM_BT_COEX_RRC;
 }
 
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -975,12 +987,12 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
 /* Tx / Host Commands */
 int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
                                  struct iwl_host_cmd *cmd);
-int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
                                      u32 flags, u16 len, const void *data);
 int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
                                         struct iwl_host_cmd *cmd,
                                         u32 *status);
-int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
                                             u16 len, const void *data,
                                             u32 *status);
 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
@@ -989,10 +1001,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        struct iwl_tx_cmd *tx_cmd,
                        struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-                              struct ieee80211_tx_info *info,
-                              struct iwl_tx_cmd *tx_cmd,
-                              struct sk_buff *skb_frag);
 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
                            struct ieee80211_tx_info *info,
                            struct ieee80211_sta *sta, __le16 fc);
@@ -1004,6 +1012,17 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+                                          struct iwl_tx_cmd *tx_cmd)
+{
+       struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+       tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+       memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+       if (info->flags & IEEE80211_TX_CTL_AMPDU)
+               tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+}
+
 static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 {
        flush_work(&mvm->async_handlers_wk);
@@ -1012,9 +1031,8 @@ static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 /* Statistics */
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                                  struct iwl_rx_packet *pkt);
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
 
@@ -1060,27 +1078,19 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
  * FW notifications / CMD responses handlers
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                     struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-                                 struct iwl_rx_cmd_buffer *rxb,
-                                 struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+                                  struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM PHY */
 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -1107,12 +1117,10 @@ int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-                           struct iwl_rx_cmd_buffer *rxb,
-                           struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-                                   struct iwl_rx_cmd_buffer *rxb,
-                                   struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+                            struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+                                    struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1136,29 +1144,24 @@ int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
                             struct ieee80211_vif *vif,
                             struct cfg80211_sched_scan_request *req,
                             struct ieee80211_scan_ies *ies,
                             int type);
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1197,9 +1200,8 @@ int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                 char *buf, int bufsz);
 
 void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb);
 
 #ifdef CONFIG_IWLWIFI_LEDS
 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
@@ -1255,9 +1257,8 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
 
 /* BT Coex */
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-                            struct iwl_rx_cmd_buffer *rxb,
-                            struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           enum ieee80211_rssi_event_data);
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -1275,9 +1276,8 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
 void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-                                struct iwl_rx_cmd_buffer *rxb,
-                                struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                               enum ieee80211_rssi_event_data);
 u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
@@ -1286,9 +1286,8 @@ bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
                                         struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
                                        enum ieee80211_band band);
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-                                     struct iwl_rx_cmd_buffer *rxb,
-                                     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+                                      struct iwl_rx_cmd_buffer *rxb);
 
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1377,9 +1376,8 @@ static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-                      struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+                       struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@@ -1391,9 +1389,8 @@ struct iwl_mcc_update_resp *
 iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                   enum iwl_mcc_source src_id);
 int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb);
 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
                                                  const char *alpha2,
                                                  enum iwl_mcc_source src_id,
@@ -1432,8 +1429,7 @@ void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
                                        struct ieee80211_vif *vif,
                                        struct ieee80211_sta *sta);
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
 
 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
index 2a6be350704a9442245f6e8bd2391503f67f0c6d..328187da7541dff2233c1f020c559522e7c4f8a7 100644 (file)
@@ -139,12 +139,6 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
                return ret;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               ret = -EIO;
-               goto exit;
-       }
 
        /* Extract NVM response */
        nvm_resp = (void *)pkt->data;
@@ -652,12 +646,6 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                return ERR_PTR(ret);
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               ret = -EIO;
-               goto exit;
-       }
 
        /* Extract MCC response */
        mcc_resp = (void *)pkt->data;
@@ -839,9 +827,8 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
        return retval;
 }
 
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
@@ -852,7 +839,7 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
        lockdep_assert_held(&mvm->mutex);
 
        if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
-               return 0;
+               return;
 
        mcc[0] = notif->mcc >> 8;
        mcc[1] = notif->mcc & 0xff;
@@ -864,10 +851,8 @@ int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
                      mcc, src);
        regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
        if (IS_ERR_OR_NULL(regd))
-               return 0;
+               return;
 
        regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
        kfree(regd);
-
-       return 0;
 }
index 3967df63e0f3822a53b0d120cfc811773e5f44c0..c8327f1a077a7518eddbe859bacc94d0a0bfd281 100644 (file)
@@ -201,14 +201,15 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
 }
 
 struct iwl_rx_handlers {
-       u8 cmd_id;
+       u16 cmd_id;
        bool async;
-       int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
+       void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 #define RX_HANDLER(_cmd_id, _fn, _async)       \
        { .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async)        \
+       { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
 
 /*
  * Handlers for fw notifications
@@ -221,7 +222,6 @@ struct iwl_rx_handlers {
  * called from a worker with mvm->mutex held.
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-       RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
        RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
        RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
        RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
@@ -261,9 +261,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
        RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
                   true),
        RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+       RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
 
 };
 #undef RX_HANDLER
+#undef RX_HANDLER_GRP
 #define CMD(x) [x] = #x
 
 static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
@@ -470,6 +472,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
        trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
        trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+       trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
+                                              IWL_UCODE_TLV_API_WIDE_CMD_HDR);
 
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
                trans_cfg.bc_table_dword = true;
@@ -576,6 +580,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        /* rpm starts with a taken ref. only set the appropriate bit here. */
        mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
 
+       iwl_mvm_tof_init(mvm);
+
        return op_mode;
 
  out_unregister:
@@ -623,14 +629,15 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
        for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
                kfree(mvm->nvm_sections[i].data);
 
+       iwl_mvm_tof_clean(mvm);
+
        ieee80211_free_hw(mvm->hw);
 }
 
 struct iwl_async_handler_entry {
        struct list_head list;
        struct iwl_rx_cmd_buffer rxb;
-       int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                 struct iwl_device_cmd *cmd);
+       void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
@@ -667,9 +674,7 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
        spin_unlock_bh(&mvm->async_handlers_lock);
 
        list_for_each_entry_safe(entry, tmp, &local_list, list) {
-               if (entry->fn(mvm, &entry->rxb, NULL))
-                       IWL_WARN(mvm,
-                                "returned value from ASYNC handlers are ignored\n");
+               entry->fn(mvm, &entry->rxb);
                iwl_free_rxb(&entry->rxb);
                list_del(&entry->list);
                kfree(entry);
@@ -698,24 +703,29 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
                if (!cmds_trig->cmds[i].cmd_id)
                        break;
 
-               if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+               if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+                   cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
                        continue;
 
                iwl_mvm_fw_dbg_collect_trig(mvm, trig,
-                                           "CMD 0x%02x received",
-                                           pkt->hdr.cmd);
+                                           "CMD 0x%02x.%02x received",
+                                           pkt->hdr.group_id, pkt->hdr.cmd);
                break;
        }
 }
 
-static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
-                              struct iwl_rx_cmd_buffer *rxb,
-                              struct iwl_device_cmd *cmd)
+static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+                               struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
        u8 i;
 
+       if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
+               iwl_mvm_rx_rx_mpdu(mvm, rxb);
+               return;
+       }
+
        iwl_mvm_rx_check_trigger(mvm, pkt);
 
        /*
@@ -729,16 +739,18 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
                const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
                struct iwl_async_handler_entry *entry;
 
-               if (rx_h->cmd_id != pkt->hdr.cmd)
+               if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
                        continue;
 
-               if (!rx_h->async)
-                       return rx_h->fn(mvm, rxb, cmd);
+               if (!rx_h->async) {
+                       rx_h->fn(mvm, rxb);
+                       return;
+               }
 
                entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
                /* we can't do much... */
                if (!entry)
-                       return 0;
+                       return;
 
                entry->rxb._page = rxb_steal_page(rxb);
                entry->rxb._offset = rxb->_offset;
@@ -750,8 +762,6 @@ static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
                schedule_work(&mvm->async_handlers_wk);
                break;
        }
-
-       return 0;
 }
 
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
index d2c6ba9d326b4656b8f6e7007554fb3a5ba8e681..506294fc2f878c037464112c307be2bb202deda6 100644 (file)
@@ -112,11 +112,12 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
 static
 void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
                                          struct ieee80211_vif *vif,
-                                         struct iwl_beacon_filter_cmd *cmd)
+                                         struct iwl_beacon_filter_cmd *cmd,
+                                         bool d0i3)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-       if (vif->bss_conf.cqm_rssi_thold) {
+       if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
                cmd->bf_energy_delta =
                        cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
                /* fw uses an absolute value for this */
@@ -509,9 +510,8 @@ static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
                       ETH_ALEN);
 }
 
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
@@ -520,8 +520,6 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
-
-       return 0;
 }
 
 struct iwl_power_vifs {
@@ -810,7 +808,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
            vif->type != NL80211_IFTYPE_STATION || vif->p2p)
                return 0;
 
-       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+       iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
        if (!d0i3)
                iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
index daff1d0a8e4adad6ebf2cdc5e811411776d63913..19a79262e0a0ea98c80139188ccac09f07b1ff87 100644 (file)
@@ -2403,7 +2403,7 @@ struct rs_init_rate_info {
        u8 rate_idx;
 };
 
-static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
        { -60, IWL_RATE_54M_INDEX },
        { -64, IWL_RATE_48M_INDEX },
        { -68, IWL_RATE_36M_INDEX },
@@ -2416,7 +2416,7 @@ static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
        { S8_MIN, IWL_RATE_1M_INDEX },
 };
 
-static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
        { -60, IWL_RATE_54M_INDEX },
        { -64, IWL_RATE_48M_INDEX },
        { -72, IWL_RATE_36M_INDEX },
@@ -2427,6 +2427,124 @@ static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
        { S8_MIN, IWL_RATE_6M_INDEX },
 };
 
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+       { -60, IWL_RATE_MCS_7_INDEX },
+       { -64, IWL_RATE_MCS_6_INDEX },
+       { -68, IWL_RATE_MCS_5_INDEX },
+       { -72, IWL_RATE_MCS_4_INDEX },
+       { -80, IWL_RATE_MCS_3_INDEX },
+       { -84, IWL_RATE_MCS_2_INDEX },
+       { -85, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+       { -60, IWL_RATE_MCS_8_INDEX },
+       { -64, IWL_RATE_MCS_7_INDEX },
+       { -68, IWL_RATE_MCS_6_INDEX },
+       { -72, IWL_RATE_MCS_5_INDEX },
+       { -80, IWL_RATE_MCS_4_INDEX },
+       { -84, IWL_RATE_MCS_3_INDEX },
+       { -85, IWL_RATE_MCS_2_INDEX },
+       { -87, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
+       { -60, IWL_RATE_MCS_9_INDEX },
+       { -64, IWL_RATE_MCS_8_INDEX },
+       { -68, IWL_RATE_MCS_7_INDEX },
+       { -72, IWL_RATE_MCS_6_INDEX },
+       { -80, IWL_RATE_MCS_5_INDEX },
+       { -84, IWL_RATE_MCS_4_INDEX },
+       { -85, IWL_RATE_MCS_3_INDEX },
+       { -87, IWL_RATE_MCS_2_INDEX },
+       { -88, IWL_RATE_MCS_1_INDEX },
+       { S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+                                struct ieee80211_sta *sta,
+                                struct iwl_lq_sta *lq_sta)
+{
+       struct rs_rate *rate = &lq_sta->optimal_rate;
+
+       if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+               rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+       else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+               rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+       else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+               rate->type = LQ_LEGACY_A;
+       else
+               rate->type = LQ_LEGACY_G;
+
+       rate->bw = rs_bw_from_sta_bw(sta);
+       rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+       /* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+       if (is_mimo(rate)) {
+               lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+       } else if (is_siso(rate)) {
+               lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+       } else {
+               lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+               if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+                       lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+               } else {
+                       lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+               }
+       }
+
+       if (is_vht(rate)) {
+               if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+                       lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+               } else {
+                       lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
+                       lq_sta->optimal_nentries =
+                               ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
+               }
+       } else if (is_ht(rate)) {
+               lq_sta->optimal_rates = rs_optimal_rates_ht;
+               lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+       }
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+                                          struct iwl_lq_sta *lq_sta)
+{
+       struct rs_rate *rate = &lq_sta->optimal_rate;
+       int i;
+
+       rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+                                    BITS_PER_LONG);
+
+       for (i = 0; i < lq_sta->optimal_nentries; i++) {
+               int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+               if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+                   (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+                       rate->index = rate_idx;
+                       break;
+               }
+       }
+
+       rs_dump_rate(mvm, rate, "OPTIMAL RATE");
+       return rate;
+}
+
 /* Choose an initial legacy rate and antenna to use based on the RSSI
  * of last Rx
  */
@@ -2468,12 +2586,12 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
 
        if (band == IEEE80211_BAND_5GHZ) {
                rate->type = LQ_LEGACY_A;
-               initial_rates = rs_init_rates_5ghz;
-               nentries = ARRAY_SIZE(rs_init_rates_5ghz);
+               initial_rates = rs_optimal_rates_5ghz_legacy;
+               nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
        } else {
                rate->type = LQ_LEGACY_G;
-               initial_rates = rs_init_rates_24ghz;
-               nentries = ARRAY_SIZE(rs_init_rates_24ghz);
+               initial_rates = rs_optimal_rates_24ghz_legacy;
+               nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
        }
 
        if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
@@ -2496,10 +2614,21 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
                         struct iwl_lq_sta *lq_sta,
                         struct ieee80211_rx_status *rx_status)
 {
+       int i;
+
        lq_sta->pers.chains = rx_status->chains;
        lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
        lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
        lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+       lq_sta->pers.last_rssi = S8_MIN;
+
+       for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+               if (!(lq_sta->pers.chains & BIT(i)))
+                       continue;
+
+               if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+                       lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+       }
 }
 
 /**
@@ -2538,6 +2667,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
        rate = &tbl->rate;
 
        rs_get_initial_rate(mvm, lq_sta, band, rate);
+       rs_init_optimal_rate(mvm, sta, lq_sta);
 
        WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
        if (rate->ant == ANT_A)
@@ -2560,6 +2690,8 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_lq_sta *lq_sta = mvm_sta;
+       struct rs_rate *optimal_rate;
+       u32 last_ucode_rate;
 
        if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
                /* if vif isn't initialized mvm doesn't know about
@@ -2583,8 +2715,18 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
 
        iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
                                  info->band, &info->control.rates[0]);
-
        info->control.rates[0].count = 1;
+
+       /* Report the optimal rate based on rssi and STA caps if we haven't
+        * converged yet (too little traffic) or exploring other modulations
+        */
+       if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+               optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+               last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+                                                         optimal_rate);
+               iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+                                         &txrc->reported_rate);
+       }
 }
 
 static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
@@ -2605,6 +2747,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
 #endif
        lq_sta->pers.chains = 0;
        memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+       lq_sta->pers.last_rssi = S8_MIN;
 
        return &sta_priv->lq_sta;
 }
index 2a3da314305ab548e3c72c7a5479e6e666be5184..81314ad9ebe09a9b069f339958a33129c05c4988 100644 (file)
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -316,6 +317,14 @@ struct iwl_lq_sta {
        u8 max_siso_rate_idx;
        u8 max_mimo2_rate_idx;
 
+       /* Optimal rate based on RSSI and STA caps.
+        * Used only to reflect link speed to userspace.
+        */
+       struct rs_rate optimal_rate;
+       unsigned long optimal_rate_mask;
+       const struct rs_init_rate_info *optimal_rates;
+       int optimal_nentries;
+
        u8 missed_rate_counter;
 
        struct iwl_lq_cmd lq;
@@ -341,6 +350,7 @@ struct iwl_lq_sta {
 #endif
                u8 chains;
                s8 chain_signal[IEEE80211_MAX_CHAINS];
+               s8 last_rssi;
                struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
                struct iwl_mvm *drv;
        } pers;
index 9ff0b4321df3b051d5e1ec3576e73036b578f53e..a0c27cc19759015c1148c97880755f9f0e4c0d89 100644 (file)
@@ -61,6 +61,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
+#include <linux/skbuff.h>
 #include "iwl-trans.h"
 #include "mvm.h"
 #include "fw-api.h"
@@ -71,8 +72,7 @@
  * Copies the phy information in mvm->last_phy_info, it will be used when the
  * actual data will come from the fw in the next packet.
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -86,8 +86,6 @@ int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                spin_unlock(&mvm->drv_stats_lock);
        }
 #endif
-
-       return 0;
 }
 
 /*
@@ -237,13 +235,25 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
        return 0;
 }
 
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+                           struct sk_buff *skb,
+                           u32 status)
+{
+       struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+       if (mvmvif->features & NETIF_F_RXCSUM &&
+           status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+           status & RX_MPDU_RES_STATUS_CSUM_OK)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
 /*
  * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
  *
  * Handles the actual data of the Rx packet from the fw
  */
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct ieee80211_hdr *hdr;
        struct ieee80211_rx_status *rx_status;
@@ -271,7 +281,7 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        skb = alloc_skb(128, GFP_ATOMIC);
        if (!skb) {
                IWL_ERR(mvm, "alloc_skb failed\n");
-               return 0;
+               return;
        }
 
        rx_status = IEEE80211_SKB_RXCB(skb);
@@ -284,14 +294,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
                               rx_pkt_status);
                kfree_skb(skb);
-               return 0;
+               return;
        }
 
        if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
                IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
                               phy_info->cfg_phy_cnt);
                kfree_skb(skb);
-               return 0;
+               return;
        }
 
        /*
@@ -366,6 +376,9 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                }
        }
 
+       if (sta && ieee80211_is_data(hdr->frame_control))
+               iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+
        rcu_read_unlock();
 
        /* set the preamble flag if appropriate */
@@ -431,7 +444,6 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
 #endif
        iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
                                        crypt_len, rxb);
-       return 0;
 }
 
 static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
@@ -623,10 +635,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                iwl_rx_packet_payload_len(pkt));
 }
 
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
-       return 0;
 }
index 5de144968723d4f2a5446f0d6f9fc0607baf2efb..95678e773c6ff0bf188624392c8b7f0a54f86cd0 100644 (file)
@@ -90,11 +90,9 @@ struct iwl_mvm_scan_params {
        int n_match_sets;
        struct iwl_scan_probe_req preq;
        struct cfg80211_match_set *match_sets;
-       struct _dwell {
-               u16 passive;
-               u16 active;
-               u16 fragmented;
-       } dwell[IEEE80211_NUM_BANDS];
+       u16 passive_dwell;
+       u16 active_dwell;
+       u16 fragmented_dwell;
        struct {
                u8 iterations;
                u8 full_scan_mul; /* not used for UMAC */
@@ -147,34 +145,6 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
                return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 }
 
-/*
- * If req->n_ssids > 0, it means we should do an active scan.
- * In case of active scan w/o directed scan, we receive a zero-length SSID
- * just to notify that this scan is active and not passive.
- * In order to notify the FW of the number of SSIDs we wish to scan (including
- * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). If the first SSID is
- * already included in the probe template, so we need to set only
- * req->n_ssids - 1 bits in addition to the first bit.
- */
-static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
-                                   enum ieee80211_band band, int n_ssids)
-{
-       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-               return 10;
-       if (band == IEEE80211_BAND_2GHZ)
-               return 20  + 3 * (n_ssids + 1);
-       return 10  + 2 * (n_ssids + 1);
-}
-
-static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
-                                    enum ieee80211_band band)
-{
-       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-                       return 110;
-       return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
-}
-
 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
 {
@@ -191,7 +161,6 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
                                    struct iwl_mvm_scan_params *params)
 {
        int global_cnt = 0;
-       enum ieee80211_band band;
        u8 frag_passive_dwell = 0;
 
        ieee80211_iterate_active_interfaces_atomic(mvm->hw,
@@ -227,14 +196,10 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
                /*
                 * P2P device scan should not be fragmented to avoid negative
                 * impact on P2P device discovery. Configure max_out_time to be
-                * equal to dwell time on passive channel. Take a longest
-                * possible value, one that corresponds to 2GHz band
+                * equal to dwell time on passive channel.
                 */
                if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-                       u32 passive_dwell =
-                               iwl_mvm_get_passive_dwell(mvm,
-                                                         IEEE80211_BAND_2GHZ);
-                       params->max_out_time = passive_dwell;
+                       params->max_out_time = 120;
                } else {
                        params->passive_fragmented = true;
                }
@@ -246,30 +211,21 @@ static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
 
 not_bound:
 
-       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].fragmented = frag_passive_dwell;
+       if (params->passive_fragmented)
+               params->fragmented_dwell = frag_passive_dwell;
+
+       /*
+        * use only basic dwell time in scan command, regardless of the band or
+        * the number of the probes. FW will calculate the actual dwell time.
+        */
+       params->passive_dwell = 110;
+       params->active_dwell = 10;
 
-               params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
-                                                                       band);
-               params->dwell[band].active =
-                       iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
-       }
 
        IWL_DEBUG_SCAN(mvm,
                       "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
                       params->max_out_time, params->suspend_time,
                       params->passive_fragmented);
-       IWL_DEBUG_SCAN(mvm,
-                      "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
-                      params->dwell[IEEE80211_BAND_2GHZ].passive,
-                      params->dwell[IEEE80211_BAND_2GHZ].active,
-                      params->dwell[IEEE80211_BAND_2GHZ].fragmented);
-       IWL_DEBUG_SCAN(mvm,
-                      "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
-                      params->dwell[IEEE80211_BAND_5GHZ].passive,
-                      params->dwell[IEEE80211_BAND_5GHZ].active,
-                      params->dwell[IEEE80211_BAND_5GHZ].fragmented);
 }
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -327,9 +283,8 @@ static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
        return buf;
 }
 
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
@@ -341,17 +296,13 @@ int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
                       iwl_mvm_dump_channel_list(notif->results,
                                                 notif->scanned_channels, buf,
                                                 sizeof(buf)));
-       return 0;
 }
 
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
        ieee80211_sched_scan_results(mvm->hw);
-
-       return 0;
 }
 
 static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
@@ -368,9 +319,8 @@ static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
        }
 }
 
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
@@ -426,8 +376,6 @@ int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
        mvm->last_ebs_successful =
                        scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
                        scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
-
-       return 0;
 }
 
 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -751,11 +699,10 @@ static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_lmac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-       cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
+       cmd->active_dwell = params->active_dwell;
+       cmd->passive_dwell = params->passive_dwell;
        if (params->passive_fragmented)
-               cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
+               cmd->fragmented_dwell = params->fragmented_dwell;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
        cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
@@ -937,9 +884,9 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        int num_channels =
                mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
                mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
-       int ret, i, j = 0, cmd_size, data_size;
+       int ret, i, j = 0, cmd_size;
        struct iwl_host_cmd cmd = {
-               .id = SCAN_CFG_CMD,
+               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
        };
 
        if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
@@ -951,8 +898,6 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        if (!scan_config)
                return -ENOMEM;
 
-       data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
-       scan_config->hdr.size = cpu_to_le16(data_size);
        scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
                                         SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
                                         SCAN_CONFIG_FLAG_SET_TX_CHAINS |
@@ -1013,11 +958,10 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_umac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
-       cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-       cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
+       cmd->active_dwell = params->active_dwell;
+       cmd->passive_dwell = params->passive_dwell;
        if (params->passive_fragmented)
-               cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
+               cmd->fragmented_dwell = params->fragmented_dwell;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
        cmd->scan_priority =
@@ -1099,8 +1043,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return uid;
 
        memset(cmd, 0, ksize(cmd));
-       cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
-                                   sizeof(struct iwl_mvm_umac_cmd_hdr));
 
        iwl_mvm_scan_umac_dwell(mvm, cmd, params);
 
@@ -1109,6 +1051,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        cmd->uid = cpu_to_le32(uid);
        cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
 
+       if (type == IWL_MVM_SCAN_SCHED)
+               cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
        if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
                cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
                                     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@ -1237,7 +1182,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               hcmd.id = SCAN_REQ_UMAC;
+               hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
                ret = iwl_mvm_scan_umac(mvm, vif, &params,
                                        IWL_MVM_SCAN_REGULAR);
        } else {
@@ -1345,7 +1290,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               hcmd.id = SCAN_REQ_UMAC;
+               hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
                ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
        } else {
                hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -1371,9 +1316,8 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
        return ret;
 }
 
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_complete *notif = (void *)pkt->data;
@@ -1381,7 +1325,7 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
        bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
        if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
-               return 0;
+               return;
 
        /* if the scan is already stopping, we don't need to notify mac80211 */
        if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
@@ -1405,13 +1349,10 @@ int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
                mvm->last_ebs_successful = false;
 
        mvm->scan_uid_status[uid] = 0;
-
-       return 0;
 }
 
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
@@ -1423,15 +1364,11 @@ int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
                       iwl_mvm_dump_channel_list(notif->results,
                                                 notif->scanned_channels, buf,
                                                 sizeof(buf)));
-       return 0;
 }
 
 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 {
-       struct iwl_umac_scan_abort cmd = {
-               .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
-                                       sizeof(struct iwl_mvm_umac_cmd_hdr)),
-       };
+       struct iwl_umac_scan_abort cmd = {};
        int uid, ret;
 
        lockdep_assert_held(&mvm->mutex);
@@ -1448,7 +1385,10 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 
        IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  iwl_cmd_id(SCAN_ABORT_UMAC,
+                                             IWL_ALWAYS_LONG_GROUP, 0),
+                                  0, sizeof(cmd), &cmd);
        if (!ret)
                mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
 
@@ -1458,7 +1398,7 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 {
        struct iwl_notification_wait wait_scan_done;
-       static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+       static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
                                              SCAN_OFFLOAD_COMPLETE, };
        int ret;
 
index 3d2fbf1bc22656b9f68ff22dda7be43ba11862b6..df216cd0c98f4659d1c00d7d07c8adb31c8244c6 100644 (file)
@@ -1148,18 +1148,31 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
 {
-       int i;
+       int i, max = -1, max_offs = -1;
 
        lockdep_assert_held(&mvm->mutex);
 
-       i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+       /* Pick the unused key offset with the highest 'deleted'
+        * counter. Every time a key is deleted, all the counters
+        * are incremented and the one that was just deleted is
+        * reset to zero. Thus, the highest counter is the one
+        * that was deleted longest ago. Pick that one.
+        */
+       for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+               if (test_bit(i, mvm->fw_key_table))
+                       continue;
+               if (mvm->fw_key_deleted[i] > max) {
+                       max = mvm->fw_key_deleted[i];
+                       max_offs = i;
+               }
+       }
 
-       if (i == STA_KEY_MAX_NUM)
+       if (max_offs < 0)
                return STA_KEY_IDX_INVALID;
 
-       __set_bit(i, mvm->fw_key_table);
+       __set_bit(max_offs, mvm->fw_key_table);
 
-       return i;
+       return max_offs;
 }
 
 static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
@@ -1399,6 +1412,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
        int ret;
+       static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1465,7 +1479,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
 end:
        IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
                      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
-                     sta->addr, ret);
+                     sta ? sta->addr : zero_addr, ret);
        return ret;
 }
 
@@ -1476,7 +1490,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 {
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
-       int ret;
+       int ret, i;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1495,6 +1509,13 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
                return -ENOENT;
        }
 
+       /* track which key was deleted last */
+       for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+               if (mvm->fw_key_deleted[i] < U8_MAX)
+                       mvm->fw_key_deleted[i]++;
+       }
+       mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
        if (sta_id == IWL_MVM_STATION_COUNT) {
                IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
                return 0;
@@ -1658,9 +1679,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
@@ -1668,15 +1688,13 @@ int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
        u32 sta_id = le32_to_cpu(notif->sta_id);
 
        if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
-               return 0;
+               return;
 
        rcu_read_lock();
        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
        if (!IS_ERR_OR_NULL(sta))
                ieee80211_sta_eosp(sta);
        rcu_read_unlock();
-
-       return 0;
 }
 
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
index 748f5dc3f9f4337952efc84fe93e5bfb89f97c81..eedb215eba3f6efd08f2ca387de80e46ec39bfb2 100644 (file)
@@ -378,9 +378,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
                             struct ieee80211_sta *sta, u32 iv32,
                             u16 *phase1key);
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb);
 
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
index a87b506c8c7272393304824409ae8473d6022277..fe2fa5650443894a0534e4b8f76cb5ea09f7fef6 100644 (file)
@@ -169,18 +169,11 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                return;
 
        pkt = cmd.resp_pkt;
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
-                       pkt->hdr.flags);
-               goto exit;
-       }
 
-       if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
-               goto exit;
+       WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
 
        /* we don't really care about the response at this point */
 
-exit:
        iwl_free_resp(&cmd);
 }
 
@@ -261,8 +254,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
                mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
 }
 
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
@@ -277,17 +269,17 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        /* can fail sometimes */
        if (!le32_to_cpu(notif->status)) {
                iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
-               goto out;
+               return;
        }
 
        if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
-               goto out;
+               return;
 
        sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
                                        lockdep_is_held(&mvm->mutex));
        /* the station may not be here, but if it is, it must be a TDLS peer */
        if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
-               goto out;
+               return;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        vif = mvmsta->vif;
@@ -301,9 +293,6 @@ int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                         msecs_to_jiffies(delay));
 
        iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
-
-out:
-       return 0;
 }
 
 static int
@@ -471,13 +460,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
        cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
 
        info = IEEE80211_SKB_CB(skb);
-       if (info->control.hw_key)
-               iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+       hdr = (void *)skb->data;
+       if (info->control.hw_key) {
+               if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+                       rcu_read_unlock();
+                       ret = -EINVAL;
+                       goto out;
+               }
+               iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+       }
 
        iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
                           mvmsta->sta_id);
 
-       hdr = (void *)skb->data;
        iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
                                hdr->frame_control);
        rcu_read_unlock();
index d24b6a83e68cfcd4281301c907cb62a24661da96..dbd7d544575de68a3972588bb117a8fa5b560ef3 100644 (file)
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
 {
        lockdep_assert_held(&mvm->time_event_lock);
 
-       if (te_data->id == TE_MAX)
+       if (!te_data->vif)
                return;
 
        list_del(&te_data->list);
@@ -410,9 +410,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
 /*
  * The Rx handler for time event notifications
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_time_event_notif *notif = (void *)pkt->data;
@@ -433,8 +432,6 @@ int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
        }
 unlock:
        spin_unlock_bh(&mvm->time_event_lock);
-
-       return 0;
 }
 
 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
@@ -503,7 +500,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
                                       struct iwl_mvm_time_event_data *te_data,
                                       struct iwl_time_event_cmd *te_cmd)
 {
-       static const u8 time_event_response[] = { TIME_EVENT_CMD };
+       static const u16 time_event_response[] = { TIME_EVENT_CMD };
        struct iwl_notification_wait wait_time_event;
        int ret;
 
@@ -566,7 +563,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+       const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
        struct iwl_notification_wait wait_te_notif;
        struct iwl_time_event_cmd time_cmd = {};
 
@@ -599,8 +596,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
        time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
 
-       time_cmd.apply_time =
-               cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+       time_cmd.apply_time = cpu_to_le32(0);
 
        time_cmd.max_frags = TE_V2_FRAG_NONE;
        time_cmd.max_delay = cpu_to_le32(max_delay);
index de4fbc6d57f150130e095fbec5471e9149d951bf..cbdf8e52a5f1cc705f57711a14265303b4b78c77 100644 (file)
@@ -157,9 +157,8 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
 /*
  * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb);
 
 /**
  * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
new file mode 100644 (file)
index 0000000..d060e12
--- /dev/null
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw-api-tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return;
+
+       memset(tof_data, 0, sizeof(*tof_data));
+
+       tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (IWL_MVM_TOF_IS_RESPONDER) {
+               tof_data->responder_cfg.sub_grp_cmd_id =
+                       cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+               tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+       }
+#endif
+
+       tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+       tof_data->range_req.req_timeout = 1;
+       tof_data->range_req.initiator = 1;
+       tof_data->range_req.report_policy = 3;
+
+       tof_data->range_req_ext.sub_grp_cmd_id =
+               cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+       struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return;
+
+       memset(tof_data, 0, sizeof(*tof_data));
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+                            struct ieee80211_vif *vif)
+{
+       bool *enabled = _data;
+
+       /* non bss vif exists */
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION)
+               *enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+       struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+       bool enabled;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  iwl_tof_iterator, &enabled);
+       if (!enabled) {
+               IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+               return -EINVAL;
+       }
+
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+       struct iwl_tof_range_abort_cmd cmd = {
+               .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+               .request_id = id,
+       };
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (id != mvm->tof_data.active_range_request) {
+               IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+                       id, mvm->tof_data.active_range_request);
+               return -EINVAL;
+       }
+
+       /* after abort is sent there's no active request anymore */
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif)
+{
+       struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (vif->p2p || vif->type != NL80211_IFTYPE_AP) {
+               IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+               return -EIO;
+       }
+
+       cmd->sta_id = mvmvif->bcast_sta.sta_id;
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_host_cmd cmd = {
+               .id = TOF_CMD,
+               .len = { sizeof(mvm->tof_data.range_req), },
+               /* no copy because of the command size */
+               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+       };
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+               IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+               return -EIO;
+       }
+
+       /* nesting of range requests is not supported in FW */
+       if (mvm->tof_data.active_range_request !=
+               IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+               IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+                       mvm->tof_data.active_range_request);
+               return -EIO;
+       }
+
+       mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+       cmd.data[0] = &mvm->tof_data.range_req;
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif)
+{
+       lockdep_assert_held(&mvm->mutex);
+
+       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+               return -EINVAL;
+
+       if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+               IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+               return -EIO;
+       }
+
+       return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+                                                   IWL_ALWAYS_LONG_GROUP, 0),
+                                   0, sizeof(mvm->tof_data.range_req_ext),
+                                   &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+       if (resp->request_id != mvm->tof_data.active_range_request) {
+               IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+                       resp->request_id, mvm->tof_data.active_range_request);
+               return -EIO;
+       }
+
+       memcpy(&mvm->tof_data.range_resp, resp,
+              sizeof(struct iwl_tof_range_rsp_ntfy));
+       mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+       return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+       IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+       return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+       struct iwl_tof_neighbor_report *report =
+               (struct iwl_tof_neighbor_report *)data;
+
+       IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+                      report->bssid, report->request_token, report->status);
+       return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+       case TOF_RANGE_RESPONSE_NOTIF:
+               iwl_mvm_tof_range_resp(mvm, resp->data);
+               break;
+       case TOF_MCSI_DEBUG_NOTIF:
+               iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+               break;
+       case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+               iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+               break;
+       default:
+              IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+                      resp->sub_grp_cmd_id);
+              break;
+       }
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
new file mode 100644 (file)
index 0000000..50ae8ad
--- /dev/null
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof
+#define __tof_h__
+
+#include "fw-api-tof.h"
+
+struct iwl_mvm_tof_data {
+       struct iwl_tof_config_cmd tof_cfg;
+       struct iwl_tof_range_req_cmd range_req;
+       struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+       struct iwl_tof_range_rsp_ntfy range_resp;
+       u8 last_abort_id;
+       u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+                             struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+                                     struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
index 80d07db6e7e8c80f472ee84fe6c31fc0f53844b1..fe7145c2c98acaa47bb19fe9dbbe4145f07491bd 100644 (file)
@@ -33,6 +33,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -154,24 +155,20 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
        return true;
 }
 
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-                      struct iwl_rx_cmd_buffer *rxb,
-                      struct iwl_device_cmd *cmd)
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        int temp;
 
        /* the notification is handled synchronously in ctkill, so skip here */
        if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
-               return 0;
+               return;
 
        temp = iwl_mvm_temp_notif_parse(mvm, pkt);
        if (temp < 0)
-               return 0;
+               return;
 
        iwl_mvm_tt_temp_changed(mvm, temp);
-
-       return 0;
 }
 
 static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
@@ -187,7 +184,7 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
 int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 {
        struct iwl_notification_wait wait_temp_notif;
-       static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+       static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
        int ret, temp;
 
        lockdep_assert_held(&mvm->mutex);
index 7ba7a118ff5ca28615f5ceeb745f577999ea52dc..15bf36ad3809d363d58290392e2bbcbcbaa540f7 100644 (file)
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 
        if (info->band == IEEE80211_BAND_2GHZ &&
            !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
-               rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
+               rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
        else
                rate_flags =
                        BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
@@ -268,19 +268,29 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-                              struct ieee80211_tx_info *info,
-                              struct iwl_tx_cmd *tx_cmd,
-                              struct sk_buff *skb_frag)
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+                                     struct ieee80211_tx_info *info,
+                                     struct iwl_tx_cmd *tx_cmd,
+                                     struct sk_buff *skb_frag,
+                                     int hdrlen)
 {
        struct ieee80211_key_conf *keyconf = info->control.hw_key;
+       u8 *crypto_hdr = skb_frag->data + hdrlen;
+       u64 pn;
 
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_CCMP:
-               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
-               if (info->flags & IEEE80211_TX_CTL_AMPDU)
-                       tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+               pn = atomic64_inc_return(&keyconf->tx_pn);
+               crypto_hdr[0] = pn;
+               crypto_hdr[2] = 0;
+               crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+               crypto_hdr[1] = pn >> 8;
+               crypto_hdr[4] = pn >> 16;
+               crypto_hdr[5] = pn >> 24;
+               crypto_hdr[6] = pn >> 32;
+               crypto_hdr[7] = pn >> 40;
                break;
 
        case WLAN_CIPHER_SUITE_TKIP:
@@ -308,7 +318,7 @@ void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
  */
 static struct iwl_device_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-                     struct ieee80211_sta *sta, u8 sta_id)
+                     int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -325,7 +335,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        if (info->control.hw_key)
-               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
 
        iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
 
@@ -346,6 +356,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        u8 sta_id;
+       int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
        if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
                return -1;
@@ -366,23 +377,34 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
 
        /*
-        * If the interface on which frame is sent is the P2P_DEVICE
+        * If the interface on which the frame is sent is the P2P_DEVICE
         * or an AP/GO interface use the broadcast station associated
-        * with it; otherwise use the AUX station.
+        * with it; otherwise if the interface is a managed interface
+        * use the AP station associated with it for multicast traffic
+        * (this is not possible for unicast packets as a TLDS discovery
+        * response are sent without a station entry); otherwise use the
+        * AUX station.
         */
-       if (info->control.vif &&
-           (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-            info->control.vif->type == NL80211_IFTYPE_AP)) {
+       sta_id = mvm->aux_sta.sta_id;
+       if (info->control.vif) {
                struct iwl_mvm_vif *mvmvif =
                        iwl_mvm_vif_from_mac80211(info->control.vif);
-               sta_id = mvmvif->bcast_sta.sta_id;
-       } else {
-               sta_id = mvm->aux_sta.sta_id;
+
+               if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+                   info->control.vif->type == NL80211_IFTYPE_AP)
+                       sta_id = mvmvif->bcast_sta.sta_id;
+               else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+                        is_multicast_ether_addr(hdr->addr1)) {
+                       u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+
+                       if (ap_sta_id != IWL_MVM_STATION_COUNT)
+                               sta_id = ap_sta_id;
+               }
        }
 
        IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
        if (!dev_cmd)
                return -1;
 
@@ -390,7 +412,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
 
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
@@ -416,9 +438,11 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        u8 tid = IWL_MAX_TID_COUNT;
        u8 txq_id = info->hw_queue;
        bool is_data_qos = false, is_ampdu = false;
+       int hdrlen;
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        fc = hdr->frame_control;
+       hdrlen = ieee80211_hdrlen(fc);
 
        if (WARN_ON_ONCE(!mvmsta))
                return -1;
@@ -426,7 +450,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                return -1;
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
        if (!dev_cmd)
                goto drop;
 
@@ -458,7 +482,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        }
 
        /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
 
        WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
 
@@ -911,8 +935,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
        rcu_read_unlock();
 }
 
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                     struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
@@ -921,8 +944,6 @@ int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                iwl_mvm_rx_tx_cmd_single(mvm, pkt);
        else
                iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-
-       return 0;
 }
 
 static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
@@ -942,8 +963,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
                (void *)(uintptr_t)tid_data->rate_n_flags;
 }
 
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
@@ -965,7 +985,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
                      tid >= IWL_MAX_TID_COUNT,
                      "sta_id %d tid %d", sta_id, tid))
-               return 0;
+               return;
 
        rcu_read_lock();
 
@@ -974,7 +994,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        /* Reclaiming frames for a station that has been deleted ? */
        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
                rcu_read_unlock();
-               return 0;
+               return;
        }
 
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -985,7 +1005,7 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                        "invalid BA notification: Q %d, tid %d, flow %d\n",
                        tid_data->txq_id, tid, scd_flow);
                rcu_read_unlock();
-               return 0;
+               return;
        }
 
        spin_lock_bh(&mvmsta->lock);
@@ -1072,8 +1092,6 @@ out:
                skb = __skb_dequeue(&reclaimed_skbs);
                ieee80211_tx_status(mvm->hw, skb);
        }
-
-       return 0;
 }
 
 /*
index 03f8e06dded72fc74a302c7e52fced302631dc01..a7d434256423382af219c1ab9221efdfe686ea69 100644 (file)
@@ -108,7 +108,7 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
        return ret;
 }
 
-int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
                         u32 flags, u16 len, const void *data)
 {
        struct iwl_host_cmd cmd = {
@@ -166,11 +166,6 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
                goto out_free_resp;
        }
 
-       if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-               ret = -EIO;
-               goto out_free_resp;
-       }
-
        resp_len = iwl_rx_packet_payload_len(pkt);
        if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
                ret = -EIO;
@@ -187,7 +182,7 @@ int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
 /*
  * We assume that the caller set the status to the sucess value
  */
-int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
                                const void *data, u32 *status)
 {
        struct iwl_host_cmd cmd = {
@@ -243,8 +238,7 @@ u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
        return fw_rate_idx_to_plcp[rate_idx];
 }
 
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -256,7 +250,6 @@ int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                le32_to_cpu(err_resp->error_service));
        IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
                le64_to_cpu(err_resp->timestamp));
-       return 0;
 }
 
 /*
index 2ed1e4d2774da83f1cd609983c8383c48d37a128..b0825c402c732c0514637b3b21b26288a7275444 100644 (file)
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
 /* 3165 Series */
        {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
 
 /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
@@ -613,6 +614,7 @@ static int iwl_pci_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct iwl_trans *trans = pci_get_drvdata(pdev);
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
 
        /* Before you put code here, think about WoWLAN. You cannot check here
@@ -630,20 +632,16 @@ static int iwl_pci_resume(struct device *device)
                return 0;
 
        /*
-        * On suspend, ict is disabled, and the interrupt mask
-        * gets cleared. Reconfigure them both in case of d0i3
-        * image. Otherwise, only enable rfkill interrupt (in
-        * order to keep track of the rfkill status)
+        * Enable rfkill interrupt (in order to keep track of
+        * the rfkill status)
         */
-       if (trans->wowlan_d0i3) {
-               iwl_pcie_reset_ict(trans);
-               iwl_enable_interrupts(trans);
-       } else {
-               iwl_enable_rfkill_int(trans);
-       }
+       iwl_enable_rfkill_int(trans);
 
        hw_rfkill = iwl_is_rfkill_set(trans);
+
+       mutex_lock(&trans_pcie->mutex);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+       mutex_unlock(&trans_pcie->mutex);
 
        return 0;
 }
index 31f72a61cc3fe06b6d9189f718cc3315677625ad..4f872f05d988f48fa5e5f6c66d650bdc0fdf47ca 100644 (file)
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
-/*
- * RX related structures and functions
- */
-#define RX_NUM_QUEUES 1
-#define RX_POST_REQ_ALLOC 2
-#define RX_CLAIM_REQ_ALLOC 8
-#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
-#define RX_LOW_WATERMARK 8
-
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @pool:
+ * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
- * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of RBDs with allocated RB ready for use
- * @rx_used: list of RBDs with no RB attached
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
- * @pool: initial pool of iwl_rx_mem_buffer for the queue
- * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
        __le32 *bd;
        dma_addr_t bd_dma;
+       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
        u32 read;
        u32 write;
        u32 free_count;
-       u32 used_count;
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
-       struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
-};
-
-/**
- * struct iwl_rb_allocator - Rx allocator
- * @pool: initial pool of allocator
- * @req_pending: number of requests the allcator had not processed yet
- * @req_ready: number of requests honored and ready for claiming
- * @rbd_allocated: RBDs with pages allocated and ready to be handled to
- *     the queue. This is a list of &struct iwl_rx_mem_buffer
- * @rbd_empty: RBDs with no page attached for allocator use. This is a list
- *     of &struct iwl_rx_mem_buffer
- * @lock: protects the rbd_allocated and rbd_empty lists
- * @alloc_wq: work queue for background calls
- * @rx_alloc: work struct for background calls
- */
-struct iwl_rb_allocator {
-       struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
-       atomic_t req_pending;
-       atomic_t req_ready;
-       struct list_head rbd_allocated;
-       struct list_head rbd_empty;
-       spinlock_t lock;
-       struct workqueue_struct *alloc_wq;
-       struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rba: allocator for RX replenishing
+ * @rx_replenish: work that will be called when buffers need to be allocated
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -299,8 +264,10 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  * @rx_buf_size_8k: 8 kB RX buffer size
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: true when ucode supports wide command header format
  * @rx_page_order: page order for receive buffer size
  * @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
  * @cmd_in_flight: true when we have a host command in flight
  * @fw_mon_phys: physical address of the buffer for the firmware monitor
  * @fw_mon_page: points to the first page of the buffer for the firmware monitor
@@ -308,7 +275,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
  */
 struct iwl_trans_pcie {
        struct iwl_rxq rxq;
-       struct iwl_rb_allocator rba;
+       struct work_struct rx_replenish;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
@@ -320,9 +287,11 @@ struct iwl_trans_pcie {
        dma_addr_t ict_tbl_dma;
        int ict_index;
        bool use_ict;
+       bool is_down;
        struct isr_statistics isr_stats;
 
        spinlock_t irq_lock;
+       struct mutex mutex;
        u32 inta_mask;
        u32 scd_base_addr;
        struct iwl_dma_ptr scd_bc_tbls;
@@ -349,6 +318,7 @@ struct iwl_trans_pcie {
        bool rx_buf_size_8k;
        bool bc_table_dword;
        bool scd_set_active;
+       bool wide_cmd_header;
        u32 rx_page_order;
 
        const char *const *command_names;
@@ -420,7 +390,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-                           struct iwl_rx_cmd_buffer *rxb, int handler_status);
+                           struct iwl_rx_cmd_buffer *rxb);
 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                            struct sk_buff_head *skbs);
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
index a3fbaa0ef5e04de7d1032c79ca35e82364dac7ea..e1af0fffedd818b0e16a6e72fbce9f9445ca8bce 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
- *   When the interrupt handler is called, the request is processed.
- *   The page is either stolen - transferred to the upper layer
- *   or reused - added immediately to the iwl->rxq->rx_free list.
- * + When the page is stolen - the driver updates the matching queue's used
- *   count, detaches the RBD and transfers it to the queue used list.
- *   When there are two used RBDs - they are transferred to the allocator empty
- *   list. Work is then scheduled for the allocator to start allocating
- *   eight buffers.
- *   When there are another 6 used RBDs - they are transferred to the allocator
- *   empty list and the driver tries to claim the pre-allocated buffers and
- *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
- *   until ready.
- *   When there are 8+ buffers in the free list - either from allocation or from
- *   8 reused unstolen pages - restock is called to update the FW and indexes.
- * + In order to make sure the allocator always has RBDs to use for allocation
- *   the allocator has initial pool in the size of num_queues*(8-2) - the
- *   maximum missing RBDs per allocation request (request posted with 2
- *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
- *   The queues supplies the recycle of the rest of the RBDs.
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ INDEX is updated (updating the
+ *   'processed' and 'read' driver indexes as well)
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + If there are no allocated buffers in iwl->rxq->rx_free,
+ * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
+ *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock.
- *                            Used only during initialization.
+ *                            iwl_pcie_rxq_restock
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.
- * iwl_pcie_rx_allocator()     Background work for allocating pages.
+ *                            the WRITE index.  If insufficient rx_free buffers
+ *                            are available, schedules iwl_pcie_rx_replenish
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
- *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
- *
- * RBD life-cycle:
- *
- * Init:
- * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
- *
- * Regular Receive interrupt:
- * Page Stolen:
- * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
- * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
- * Page not Stolen:
- * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
                rxq->free_count--;
        }
        spin_unlock(&rxq->lock);
+       /* If the pre-allocated buffer pool is dropping low, schedule to
+        * refill it */
+       if (rxq->free_count <= RX_LOW_WATERMARK)
+               schedule_work(&trans_pcie->rx_replenish);
 
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
@@ -277,44 +254,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
        }
 }
 
-/*
- * iwl_pcie_rx_alloc_page - allocates and returns a page.
- *
- */
-static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
-       struct page *page;
-       gfp_t gfp_mask = GFP_KERNEL;
-
-       if (rxq->free_count > RX_LOW_WATERMARK)
-               gfp_mask |= __GFP_NOWARN;
-
-       if (trans_pcie->rx_page_order > 0)
-               gfp_mask |= __GFP_COMP;
-
-       /* Alloc a new receive buffer */
-       page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-       if (!page) {
-               if (net_ratelimit())
-                       IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
-                                      trans_pcie->rx_page_order);
-               /* Issue an error if the hardware has consumed more than half
-                * of its free buffer list and we don't have enough
-                * pre-allocated buffers.
-`               */
-               if (rxq->free_count <= RX_LOW_WATERMARK &&
-                   iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
-                   net_ratelimit())
-                       IWL_CRIT(trans,
-                                "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
-                                rxq->free_count);
-               return NULL;
-       }
-       return page;
-}
-
 /*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
+       gfp_t gfp_mask = priority;
 
        while (1) {
                spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
                }
                spin_unlock(&rxq->lock);
 
+               if (rxq->free_count > RX_LOW_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
+
+               if (trans_pcie->rx_page_order > 0)
+                       gfp_mask |= __GFP_COMP;
+
                /* Alloc a new receive buffer */
-               page = iwl_pcie_rx_alloc_page(trans);
-               if (!page)
+               page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+               if (!page) {
+                       if (net_ratelimit())
+                               IWL_DEBUG_INFO(trans, "alloc_pages failed, "
+                                          "order: %d\n",
+                                          trans_pcie->rx_page_order);
+
+                       if ((rxq->free_count <= RX_LOW_WATERMARK) &&
+                           net_ratelimit())
+                               IWL_CRIT(trans, "Failed to alloc_pages with %s."
+                                        "Only %u free buffers remaining.\n",
+                                        priority == GFP_ATOMIC ?
+                                        "GFP_ATOMIC" : "GFP_KERNEL",
+                                        rxq->free_count);
+                       /* We don't reschedule replenish work here -- we will
+                        * call the restock method and if it still needs
+                        * more buffers it will schedule replenish */
                        return;
+               }
 
                spin_lock(&rxq->lock);
 
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
 
        lockdep_assert_held(&rxq->lock);
 
-       for (i = 0; i < RX_QUEUE_SIZE; i++) {
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
                if (!rxq->pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called only during initialization
+ * This is called as a scheduled work item (except for during initialization)
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
 {
-       iwl_pcie_rxq_alloc_rbs(trans);
+       iwl_pcie_rxq_alloc_rbs(trans, gfp);
 
        iwl_pcie_rxq_restock(trans);
 }
 
-/*
- * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
- *
- * Allocates for each received request 8 pages
- * Called as a scheduled work item.
- */
-static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+static void iwl_pcie_rx_replenish_work(struct work_struct *data)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
-       while (atomic_read(&rba->req_pending)) {
-               int i;
-               struct list_head local_empty;
-               struct list_head local_allocated;
-
-               INIT_LIST_HEAD(&local_allocated);
-               spin_lock(&rba->lock);
-               /* swap out the entire rba->rbd_empty to a local list */
-               list_replace_init(&rba->rbd_empty, &local_empty);
-               spin_unlock(&rba->lock);
-
-               for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
-                       struct iwl_rx_mem_buffer *rxb;
-                       struct page *page;
-
-                       /* List should never be empty - each reused RBD is
-                        * returned to the list, and initial pool covers any
-                        * possible gap between the time the page is allocated
-                        * to the time the RBD is added.
-                        */
-                       BUG_ON(list_empty(&local_empty));
-                       /* Get the first rxb from the rbd list */
-                       rxb = list_first_entry(&local_empty,
-                                              struct iwl_rx_mem_buffer, list);
-                       BUG_ON(rxb->page);
-
-                       /* Alloc a new receive buffer */
-                       page = iwl_pcie_rx_alloc_page(trans);
-                       if (!page)
-                               continue;
-                       rxb->page = page;
-
-                       /* Get physical address of the RB */
-                       rxb->page_dma = dma_map_page(trans->dev, page, 0,
-                                       PAGE_SIZE << trans_pcie->rx_page_order,
-                                       DMA_FROM_DEVICE);
-                       if (dma_mapping_error(trans->dev, rxb->page_dma)) {
-                               rxb->page = NULL;
-                               __free_pages(page, trans_pcie->rx_page_order);
-                               continue;
-                       }
-                       /* dma address must be no more than 36 bits */
-                       BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-                       /* and also 256 byte aligned! */
-                       BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
-                       /* move the allocated entry to the out list */
-                       list_move(&rxb->list, &local_allocated);
-                       i++;
-               }
-
-               spin_lock(&rba->lock);
-               /* add the allocated rbds to the allocator allocated list */
-               list_splice_tail(&local_allocated, &rba->rbd_allocated);
-               /* add the unused rbds back to the allocator empty list */
-               list_splice_tail(&local_empty, &rba->rbd_empty);
-               spin_unlock(&rba->lock);
-
-               atomic_dec(&rba->req_pending);
-               atomic_inc(&rba->req_ready);
-       }
-}
-
-/*
- * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
-.*
-.* Called by queue when the queue posted allocation request and
- * has freed 8 RBDs in order to restock itself.
- */
-static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
-                                    struct iwl_rx_mem_buffer
-                                    *out[RX_CLAIM_REQ_ALLOC])
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i;
-
-       if (atomic_dec_return(&rba->req_ready) < 0) {
-               atomic_inc(&rba->req_ready);
-               IWL_DEBUG_RX(trans,
-                            "Allocation request not ready, pending requests = %d\n",
-                            atomic_read(&rba->req_pending));
-               return -ENOMEM;
-       }
-
-       spin_lock(&rba->lock);
-       for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
-               /* Get next free Rx buffer, remove it from free list */
-               out[i] = list_first_entry(&rba->rbd_allocated,
-                              struct iwl_rx_mem_buffer, list);
-               list_del(&out[i]->list);
-       }
-       spin_unlock(&rba->lock);
-
-       return 0;
-}
-
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
-{
-       struct iwl_rb_allocator *rba_p =
-               container_of(data, struct iwl_rb_allocator, rx_alloc);
        struct iwl_trans_pcie *trans_pcie =
-               container_of(rba_p, struct iwl_trans_pcie, rba);
+           container_of(data, struct iwl_trans_pcie, rx_replenish);
 
-       iwl_pcie_rx_allocator(trans_pcie->trans);
+       iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
 
        memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
        spin_lock_init(&rxq->lock);
-       spin_lock_init(&rba->lock);
 
        if (WARN_ON(rxq->bd || rxq->rb_stts))
                return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
        INIT_LIST_HEAD(&rxq->rx_free);
        INIT_LIST_HEAD(&rxq->rx_used);
        rxq->free_count = 0;
-       rxq->used_count = 0;
 
-       for (i = 0; i < RX_QUEUE_SIZE; i++)
+       for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
                list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
-static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
-{
-       int i;
-
-       lockdep_assert_held(&rba->lock);
-
-       INIT_LIST_HEAD(&rba->rbd_allocated);
-       INIT_LIST_HEAD(&rba->rbd_empty);
-
-       for (i = 0; i < RX_POOL_SIZE; i++)
-               list_add(&rba->pool[i].list, &rba->rbd_empty);
-}
-
-static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i;
-
-       lockdep_assert_held(&rba->lock);
-
-       for (i = 0; i < RX_POOL_SIZE; i++) {
-               if (!rba->pool[i].page)
-                       continue;
-               dma_unmap_page(trans->dev, rba->pool[i].page_dma,
-                              PAGE_SIZE << trans_pcie->rx_page_order,
-                              DMA_FROM_DEVICE);
-               __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
-               rba->pool[i].page = NULL;
-       }
-}
-
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i, err;
 
        if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
                if (err)
                        return err;
        }
-       if (!rba->alloc_wq)
-               rba->alloc_wq = alloc_workqueue("rb_allocator",
-                                               WQ_HIGHPRI | WQ_UNBOUND, 1);
-       INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
-
-       spin_lock(&rba->lock);
-       atomic_set(&rba->req_pending, 0);
-       atomic_set(&rba->req_ready, 0);
-       /* free all first - we might be reconfigured for a different size */
-       iwl_pcie_rx_free_rba(trans);
-       iwl_pcie_rx_init_rba(rba);
-       spin_unlock(&rba->lock);
 
        spin_lock(&rxq->lock);
 
+       INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
+
        /* free all first - we might be reconfigured for a different size */
        iwl_pcie_rxq_free_rbs(trans);
        iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
 
-       iwl_pcie_rx_replenish(trans);
+       iwl_pcie_rx_replenish(trans, GFP_KERNEL);
 
        iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
        /*if rxq->bd is NULL, it means that nothing has been allocated,
         * exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
                return;
        }
 
-       cancel_work_sync(&rba->rx_alloc);
-       if (rba->alloc_wq) {
-               destroy_workqueue(rba->alloc_wq);
-               rba->alloc_wq = NULL;
-       }
-
-       spin_lock(&rba->lock);
-       iwl_pcie_rx_free_rba(trans);
-       spin_unlock(&rba->lock);
+       cancel_work_sync(&trans_pcie->rx_replenish);
 
        spin_lock(&rxq->lock);
        iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
        rxq->rb_stts = NULL;
 }
 
-/*
- * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
- *
- * Called when a RBD can be reused. The RBD is transferred to the allocator.
- * When there are 2 empty RBDs - a request for allocation is posted
- */
-static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
-                                 struct iwl_rx_mem_buffer *rxb,
-                                 struct iwl_rxq *rxq)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-
-       /* Count the used RBDs */
-       rxq->used_count++;
-
-       /* Move the RBD to the used list, will be moved to allocator in batches
-        * before claiming or posting a request*/
-       list_add_tail(&rxb->list, &rxq->rx_used);
-
-       /* If we have RX_POST_REQ_ALLOC new released rx buffers -
-        * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
-        * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
-        * after but we still need to post another request.
-        */
-       if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
-               /* Move the 2 RBDs to the allocator ownership.
-                Allocator has another 6 from pool for the request completion*/
-               spin_lock(&rba->lock);
-               list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-               spin_unlock(&rba->lock);
-
-               atomic_inc(&rba->req_pending);
-               queue_work(rba->alloc_wq, &rba->rx_alloc);
-       }
-}
-
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                                struct iwl_rx_mem_buffer *rxb)
 {
@@ -823,10 +583,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 
        while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
                struct iwl_rx_packet *pkt;
-               struct iwl_device_cmd *cmd;
                u16 sequence;
                bool reclaim;
-               int index, cmd_index, err, len;
+               int index, cmd_index, len;
                struct iwl_rx_cmd_buffer rxcb = {
                        ._offset = offset,
                        ._rx_page_order = trans_pcie->rx_page_order,
@@ -874,12 +633,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               if (reclaim)
-                       cmd = txq->entries[cmd_index].cmd;
-               else
-                       cmd = NULL;
-
-               err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+               iwl_op_mode_rx(trans->op_mode, &rxcb);
 
                if (reclaim) {
                        kzfree(txq->entries[cmd_index].free_buf);
@@ -897,7 +651,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         * iwl_trans_send_cmd()
                         * as we reclaim the driver command queue */
                        if (!rxcb._page_stolen)
-                               iwl_pcie_hcmd_complete(trans, &rxcb, err);
+                               iwl_pcie_hcmd_complete(trans, &rxcb);
                        else
                                IWL_WARN(trans, "Claim null rxb?\n");
                }
@@ -928,13 +682,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                         */
                        __free_pages(rxb->page, trans_pcie->rx_page_order);
                        rxb->page = NULL;
-                       iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+                       list_add_tail(&rxb->list, &rxq->rx_used);
                } else {
                        list_add_tail(&rxb->list, &rxq->rx_free);
                        rxq->free_count++;
                }
        } else
-               iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
+               list_add_tail(&rxb->list, &rxq->rx_used);
 }
 
 /*
@@ -944,7 +698,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
-       u32 r, i, j;
+       u32 r, i;
+       u8 fill_rx = 0;
+       u32 count = 8;
+       int total_empty;
 
 restart:
        spin_lock(&rxq->lock);
@@ -957,6 +714,14 @@ restart:
        if (i == r)
                IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
+       /* calculate total frames need to be restock after handling RX */
+       total_empty = r - rxq->write_actual;
+       if (total_empty < 0)
+               total_empty += RX_QUEUE_SIZE;
+
+       if (total_empty > (RX_QUEUE_SIZE / 2))
+               fill_rx = 1;
+
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
@@ -968,48 +733,29 @@ restart:
                iwl_pcie_rx_handle_rb(trans, rxb);
 
                i = (i + 1) & RX_QUEUE_MASK;
-
-               /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
-                * try to claim the pre-allocated buffers from the allocator */
-               if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
-                       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-                       struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
-
-                       /* Add the remaining 6 empty RBDs for allocator use */
-                       spin_lock(&rba->lock);
-                       list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
-                       spin_unlock(&rba->lock);
-
-                       /* If not ready - continue, will try to reclaim later.
-                       * No need to reschedule work - allocator exits only on
-                       * success */
-                       if (!iwl_pcie_rx_allocator_get(trans, out)) {
-                               /* If success - then RX_CLAIM_REQ_ALLOC
-                                * buffers were retrieved and should be added
-                                * to free list */
-                               rxq->used_count -= RX_CLAIM_REQ_ALLOC;
-                               for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
-                                       list_add_tail(&out[j]->list,
-                                                     &rxq->rx_free);
-                                       rxq->free_count++;
-                               }
+               /* If there are a lot of unused frames,
+                * restock the Rx queue so ucode wont assert. */
+               if (fill_rx) {
+                       count++;
+                       if (count >= 8) {
+                               rxq->read = i;
+                               spin_unlock(&rxq->lock);
+                               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+                               count = 0;
+                               goto restart;
                        }
                }
-               /* handle restock for two cases:
-               * - we just pulled buffers from the allocator
-               * - we have 8+ unstolen pages accumulated */
-               if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
-                       rxq->read = i;
-                       spin_unlock(&rxq->lock);
-                       iwl_pcie_rxq_restock(trans);
-                       goto restart;
-               }
        }
 
        /* Backtrack one entry */
        rxq->read = i;
        spin_unlock(&rxq->lock);
 
+       if (fill_rx)
+               iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
+       else
+               iwl_pcie_rxq_restock(trans);
+
        if (trans_pcie->napi.poll)
                napi_gro_flush(&trans_pcie->napi, false);
 }
@@ -1020,6 +766,7 @@ restart:
 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int i;
 
        /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
        if (trans->cfg->internal_wimax_coex &&
@@ -1043,6 +790,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
        iwl_trans_fw_error(trans);
        local_bh_enable();
 
+       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+               del_timer(&trans_pcie->txq[i].stuck_timer);
+
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
        wake_up(&trans_pcie->wait_command_queue);
 }
@@ -1251,7 +1001,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
                isr_stats->rfkill++;
 
+               mutex_lock(&trans_pcie->mutex);
                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+               mutex_unlock(&trans_pcie->mutex);
                if (hw_rfkill) {
                        set_bit(STATUS_RFKILL, &trans->status);
                        if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
@@ -1443,8 +1195,9 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
 
        val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
 
-       val |= CSR_DRAM_INT_TBL_ENABLE;
-       val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+       val |= CSR_DRAM_INT_TBL_ENABLE |
+              CSR_DRAM_INIT_TBL_WRAP_CHECK |
+              CSR_DRAM_INIT_TBL_WRITE_POINTER;
 
        IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
 
index 43ae658af6ec56506f9f0e8ed00022b890c3b9fb..0549c91ad3729fdedcd7da944d796674a6cec167 100644 (file)
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
 
 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 {
-       if (!trans->cfg->apmg_not_supported)
+       if (trans->cfg->apmg_not_supported)
                return;
 
        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -881,6 +881,14 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
                case PRPH_CLEARBIT:
                        iwl_clear_bits_prph(trans, addr, BIT(val));
                        break;
+               case PRPH_BLOCKBIT:
+                       if (iwl_read_prph(trans, addr) & BIT(val)) {
+                               IWL_ERR(trans,
+                                       "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+                                       val, addr);
+                               goto monitor;
+                       }
+                       break;
                default:
                        IWL_ERR(trans, "FW debug - unknown OP %d\n",
                                dest->reg_ops[i].op);
@@ -888,6 +896,7 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
                }
        }
 
+monitor:
        if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
                               trans_pcie->fw_mon_phys >> dest->base_shift);
@@ -982,13 +991,25 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                                   const struct fw_img *fw, bool run_in_rfkill)
 {
-       int ret;
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
+       int ret;
+
+       mutex_lock(&trans_pcie->mutex);
+
+       /* Someone called stop_device, don't try to start_fw */
+       if (trans_pcie->is_down) {
+               IWL_WARN(trans,
+                        "Can't start_fw since the HW hasn't been started\n");
+               ret = EIO;
+               goto out;
+       }
 
        /* This may fail if AMT took ownership of the device */
        if (iwl_pcie_prepare_card_hw(trans)) {
                IWL_WARN(trans, "Exit HW not ready\n");
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        iwl_enable_rfkill_int(trans);
@@ -1000,15 +1021,17 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
        else
                clear_bit(STATUS_RFKILL, &trans->status);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-       if (hw_rfkill && !run_in_rfkill)
-               return -ERFKILL;
+       if (hw_rfkill && !run_in_rfkill) {
+               ret = -ERFKILL;
+               goto out;
+       }
 
        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
        ret = iwl_pcie_nic_init(trans);
        if (ret) {
                IWL_ERR(trans, "Unable to init nic\n");
-               return ret;
+               goto out;
        }
 
        /* make sure rfkill handshake bits are cleared */
@@ -1026,9 +1049,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 
        /* Load the given image to the HW */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-               return iwl_pcie_load_given_ucode_8000(trans, fw);
+               ret = iwl_pcie_load_given_ucode_8000(trans, fw);
        else
-               return iwl_pcie_load_given_ucode(trans, fw);
+               ret = iwl_pcie_load_given_ucode(trans, fw);
+
+out:
+       mutex_unlock(&trans_pcie->mutex);
+       return ret;
 }
 
 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@@ -1037,11 +1064,18 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        iwl_pcie_tx_start(trans, scd_addr);
 }
 
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill, was_hw_rfkill;
 
+       lockdep_assert_held(&trans_pcie->mutex);
+
+       if (trans_pcie->is_down)
+               return;
+
+       trans_pcie->is_down = true;
+
        was_hw_rfkill = iwl_is_rfkill_set(trans);
 
        /* tell the device to stop sending interrupts */
@@ -1131,14 +1165,36 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
        iwl_pcie_prepare_card_hw(trans);
 }
 
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       mutex_lock(&trans_pcie->mutex);
+       _iwl_trans_pcie_stop_device(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+}
+
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
 {
+       struct iwl_trans_pcie __maybe_unused *trans_pcie =
+               IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       lockdep_assert_held(&trans_pcie->mutex);
+
        if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
-               iwl_trans_pcie_stop_device(trans, true);
+               _iwl_trans_pcie_stop_device(trans, true);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+       if (trans->wowlan_d0i3) {
+               /* Enable persistence mode to avoid reset */
+               iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+                           CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+       }
+
        iwl_disable_interrupts(trans);
 
        /*
@@ -1150,17 +1206,21 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 
        iwl_pcie_disable_ict(trans);
 
+       synchronize_irq(trans_pcie->pci_dev->irq);
+
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
-       /*
-        * reset TX queues -- some of their registers reset during S3
-        * so if we don't reset everything here the D3 image would try
-        * to execute some invalid memory upon resume
-        */
-       iwl_trans_pcie_tx_reset(trans);
+       if (!trans->wowlan_d0i3) {
+               /*
+                * reset TX queues -- some of their registers reset during S3
+                * so if we don't reset everything here the D3 image would try
+                * to execute some invalid memory upon resume
+                */
+               iwl_trans_pcie_tx_reset(trans);
+       }
 
        iwl_pcie_set_pwr(trans, true);
 }
@@ -1202,12 +1262,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
        iwl_pcie_set_pwr(trans, false);
 
-       iwl_trans_pcie_tx_reset(trans);
+       if (trans->wowlan_d0i3) {
+               iwl_clear_bit(trans, CSR_GP_CNTRL,
+                             CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       } else {
+               iwl_trans_pcie_tx_reset(trans);
 
-       ret = iwl_pcie_rx_init(trans);
-       if (ret) {
-               IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
-               return ret;
+               ret = iwl_pcie_rx_init(trans);
+               if (ret) {
+                       IWL_ERR(trans,
+                               "Failed to resume the device (RX reset)\n");
+                       return ret;
+               }
        }
 
        val = iwl_read32(trans, CSR_RESET);
@@ -1219,11 +1285,14 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
        return 0;
 }
 
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
        int err;
 
+       lockdep_assert_held(&trans_pcie->mutex);
+
        err = iwl_pcie_prepare_card_hw(trans);
        if (err) {
                IWL_ERR(trans, "Error while preparing HW: %d\n", err);
@@ -1240,20 +1309,38 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
        /* From now on, the op_mode will be kept updated about RF kill state */
        iwl_enable_rfkill_int(trans);
 
+       /* Set is_down to false here so that...*/
+       trans_pcie->is_down = false;
+
        hw_rfkill = iwl_is_rfkill_set(trans);
        if (hw_rfkill)
                set_bit(STATUS_RFKILL, &trans->status);
        else
                clear_bit(STATUS_RFKILL, &trans->status);
+       /* ... rfkill can call stop_device and set it false if needed */
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
        return 0;
 }
 
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret;
+
+       mutex_lock(&trans_pcie->mutex);
+       ret = _iwl_trans_pcie_start_hw(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+
+       return ret;
+}
+
 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+       mutex_lock(&trans_pcie->mutex);
+
        /* disable interrupts - don't enable HW RF kill interrupt */
        spin_lock(&trans_pcie->irq_lock);
        iwl_disable_interrupts(trans);
@@ -1266,6 +1353,10 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
        spin_unlock(&trans_pcie->irq_lock);
 
        iwl_pcie_disable_ict(trans);
+
+       mutex_unlock(&trans_pcie->mutex);
+
+       synchronize_irq(trans_pcie->pci_dev->irq);
 }
 
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1326,6 +1417,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
        else
                trans_pcie->rx_page_order = get_order(4 * 1024);
 
+       trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
        trans_pcie->command_names = trans_cfg->command_names;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -2459,7 +2551,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
        u16 pci_cmd;
-       int err;
+       int ret;
 
        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
                                &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2472,10 +2564,11 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        spin_lock_init(&trans_pcie->irq_lock);
        spin_lock_init(&trans_pcie->reg_lock);
        spin_lock_init(&trans_pcie->ref_lock);
+       mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
-       err = pci_enable_device(pdev);
-       if (err)
+       ret = pci_enable_device(pdev);
+       if (ret)
                goto out_no_pci;
 
        if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2584,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
-       if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
-       if (err) {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (!err)
-                       err = pci_set_consistent_dma_mask(pdev,
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (!ret)
+               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+       if (ret) {
+               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (!ret)
+                       ret = pci_set_consistent_dma_mask(pdev,
                                                          DMA_BIT_MASK(32));
                /* both attempts failed: */
-               if (err) {
+               if (ret) {
                        dev_err(&pdev->dev, "No suitable DMA available\n");
                        goto out_pci_disable_device;
                }
        }
 
-       err = pci_request_regions(pdev, DRV_NAME);
-       if (err) {
+       ret = pci_request_regions(pdev, DRV_NAME);
+       if (ret) {
                dev_err(&pdev->dev, "pci_request_regions failed\n");
                goto out_pci_disable_device;
        }
@@ -2515,7 +2608,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
        if (!trans_pcie->hw_base) {
                dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
-               err = -ENODEV;
+               ret = -ENODEV;
                goto out_pci_release_regions;
        }
 
@@ -2527,9 +2620,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        trans_pcie->pci_dev = pdev;
        iwl_disable_interrupts(trans);
 
-       err = pci_enable_msi(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
+       ret = pci_enable_msi(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
                /* enable rfkill interrupt: hw bug w/a */
                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2640,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
         */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
                unsigned long flags;
-               int ret;
 
                trans->hw_rev = (trans->hw_rev & 0xfff0) |
                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
 
+               ret = iwl_pcie_prepare_card_hw(trans);
+               if (ret) {
+                       IWL_WARN(trans, "Exit HW not ready\n");
+                       goto out_pci_disable_msi;
+               }
+
                /*
                 * in-order to recognize C step driver should read chip version
                 * id located at the AUX bus MISC address space.
@@ -2591,13 +2689,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
 
-       if (iwl_pcie_alloc_ict(trans))
+       ret = iwl_pcie_alloc_ict(trans);
+       if (ret)
                goto out_pci_disable_msi;
 
-       err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+       ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
                                   iwl_pcie_irq_handler,
                                   IRQF_SHARED, DRV_NAME, trans);
-       if (err) {
+       if (ret) {
                IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
                goto out_free_ict;
        }
@@ -2617,5 +2716,5 @@ out_pci_disable_device:
        pci_disable_device(pdev);
 out_no_pci:
        iwl_trans_free(trans);
-       return ERR_PTR(err);
+       return ERR_PTR(ret);
 }
index 2b86c2135de36f627b397add88628bc47aa37271..601eee1ad60b3c9771724678ae17e0a8a6a54f8a 100644 (file)
@@ -219,8 +219,6 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
 
        scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
 
-       WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
        sta_id = tx_cmd->sta_id;
        sec_ctl = tx_cmd->sec_ctl;
 
@@ -239,6 +237,9 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
        if (trans_pcie->bc_table_dword)
                len = DIV_ROUND_UP(len, 4);
 
+       if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+               return;
+
        bc_ent = cpu_to_le16(len | (sta_id << 12));
 
        scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -915,6 +916,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
                }
        }
 
+       iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
        if (trans->cfg->base_params->num_of_queues > 20)
                iwl_set_bits_prph(trans, SCD_GP_CTRL,
                                  SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1320,13 +1322,24 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        int idx;
        u16 copy_size, cmd_size, scratch_size;
        bool had_nocopy = false;
+       u8 group_id = iwl_cmd_groupid(cmd->id);
        int i, ret;
        u32 cmd_pos;
        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 
-       copy_size = sizeof(out_cmd->hdr);
-       cmd_size = sizeof(out_cmd->hdr);
+       if (WARN(!trans_pcie->wide_cmd_header &&
+                group_id > IWL_ALWAYS_LONG_GROUP,
+                "unsupported wide command %#x\n", cmd->id))
+               return -EINVAL;
+
+       if (group_id != 0) {
+               copy_size = sizeof(struct iwl_cmd_header_wide);
+               cmd_size = sizeof(struct iwl_cmd_header_wide);
+       } else {
+               copy_size = sizeof(struct iwl_cmd_header);
+               cmd_size = sizeof(struct iwl_cmd_header);
+       }
 
        /* need one for the header if the first is NOCOPY */
        BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
@@ -1416,16 +1429,32 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                out_meta->source = cmd;
 
        /* set up the header */
-
-       out_cmd->hdr.cmd = cmd->id;
-       out_cmd->hdr.flags = 0;
-       out_cmd->hdr.sequence =
-               cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
-                                        INDEX_TO_SEQ(q->write_ptr));
+       if (group_id != 0) {
+               out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+               out_cmd->hdr_wide.group_id = group_id;
+               out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+               out_cmd->hdr_wide.length =
+                       cpu_to_le16(cmd_size -
+                                   sizeof(struct iwl_cmd_header_wide));
+               out_cmd->hdr_wide.reserved = 0;
+               out_cmd->hdr_wide.sequence =
+                       cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+                                                INDEX_TO_SEQ(q->write_ptr));
+
+               cmd_pos = sizeof(struct iwl_cmd_header_wide);
+               copy_size = sizeof(struct iwl_cmd_header_wide);
+       } else {
+               out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+               out_cmd->hdr.sequence =
+                       cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+                                                INDEX_TO_SEQ(q->write_ptr));
+               out_cmd->hdr.group_id = 0;
+
+               cmd_pos = sizeof(struct iwl_cmd_header);
+               copy_size = sizeof(struct iwl_cmd_header);
+       }
 
        /* and copy the data that needs to be copied */
-       cmd_pos = offsetof(struct iwl_device_cmd, payload);
-       copy_size = sizeof(out_cmd->hdr);
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                int copy;
 
@@ -1464,9 +1493,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        }
 
        IWL_DEBUG_HC(trans,
-                    "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+                    "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
                     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
-                    out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+                    group_id, out_cmd->hdr.cmd,
+                    le16_to_cpu(out_cmd->hdr.sequence),
                     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
 
        /* start the TFD with the scratchbuf */
@@ -1521,7 +1551,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                kzfree(txq->entries[idx].free_buf);
        txq->entries[idx].free_buf = dup_buf;
 
-       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
+       trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr && txq->wd_timeout)
@@ -1552,15 +1582,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
 /*
  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
  * @rxb: Rx buffer to reclaim
- * @handler_status: return value of the handler of the command
- *     (put in setup_rx_handlers)
  *
  * If an Rx buffer has an async callback associated with it the callback
  * will be executed.  The attached skb (if present) will only be freed
  * if the callback returns 1
  */
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-                           struct iwl_rx_cmd_buffer *rxb, int handler_status)
+                           struct iwl_rx_cmd_buffer *rxb)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1599,7 +1627,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                meta->source->resp_pkt = pkt;
                meta->source->_rx_page_addr = (unsigned long)page_address(p);
                meta->source->_rx_page_order = trans_pcie->rx_page_order;
-               meta->source->handler_status = handler_status;
        }
 
        iwl_pcie_cmdq_reclaim(trans, txq_id, index);
index 7217da4f1543aed26d965ae7efed4da0f402765b..57a80cfa39b1b50743f3e1041efff37eda1bb913 100644 (file)
@@ -112,7 +112,9 @@ static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
        if (!skb)
                return;
 
-       ieee80211_rx_ni(dev->hw, skb);
+       spin_lock(&dev->mac_lock);
+       ieee80211_rx(dev->hw, skb);
+       spin_unlock(&dev->mac_lock);
 }
 
 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
@@ -236,23 +238,42 @@ static void mt7601u_complete_tx(struct urb *urb)
        skb = q->e[q->start].skb;
        trace_mt_tx_dma_done(dev, skb);
 
-       mt7601u_tx_status(dev, skb);
+       __skb_queue_tail(&dev->tx_skb_done, skb);
+       tasklet_schedule(&dev->tx_tasklet);
 
        if (q->used == q->entries - q->entries / 8)
                ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
 
        q->start = (q->start + 1) % q->entries;
        q->used--;
+out:
+       spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
 
-       if (urb->status)
-               goto out;
+static void mt7601u_tx_tasklet(unsigned long data)
+{
+       struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+       struct sk_buff_head skbs;
+       unsigned long flags;
+
+       __skb_queue_head_init(&skbs);
+
+       spin_lock_irqsave(&dev->tx_lock, flags);
 
        set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
        if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
                queue_delayed_work(dev->stat_wq, &dev->stat_work,
                                   msecs_to_jiffies(10));
-out:
+
+       skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
        spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+       while (!skb_queue_empty(&skbs)) {
+               struct sk_buff *skb = __skb_dequeue(&skbs);
+
+               mt7601u_tx_status(dev, skb);
+       }
 }
 
 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
@@ -475,6 +496,7 @@ int mt7601u_dma_init(struct mt7601u_dev *dev)
 {
        int ret = -ENOMEM;
 
+       tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
        tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
 
        ret = mt7601u_alloc_tx(dev);
@@ -502,4 +524,6 @@ void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
 
        mt7601u_free_rx(dev);
        mt7601u_free_tx(dev);
+
+       tasklet_kill(&dev->tx_tasklet);
 }
index df3dd56199a7ec8a43202c8d40b8d919df846006..26190fd33407bc5e0e33eff25d223d1956ebb16e 100644 (file)
@@ -454,8 +454,10 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
        spin_lock_init(&dev->tx_lock);
        spin_lock_init(&dev->rx_lock);
        spin_lock_init(&dev->lock);
+       spin_lock_init(&dev->mac_lock);
        spin_lock_init(&dev->con_mon_lock);
        atomic_set(&dev->avg_ampdu_len, 1);
+       skb_queue_head_init(&dev->tx_skb_done);
 
        dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
        if (!dev->stat_wq) {
index 7514bce1ac91dfa61bfe82e822c92eac57f1a129..e21c53ed09fb902b91793b792f21762779e89345 100644 (file)
@@ -181,7 +181,11 @@ void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat)
        }
 
        mt76_mac_fill_tx_status(dev, &info, stat);
+
+       spin_lock_bh(&dev->mac_lock);
        ieee80211_tx_status_noskb(dev->hw, sta, &info);
+       spin_unlock_bh(&dev->mac_lock);
+
        rcu_read_unlock();
 }
 
index 9102be6b95cb70bb51f4ce34864549046e922605..428bd2f10b7b3450a2afbdafd988254db44920c7 100644 (file)
@@ -141,12 +141,13 @@ enum {
 /**
  * struct mt7601u_dev - adapter structure
  * @lock:              protects @wcid->tx_rate.
+ * @mac_lock:          locks out mac80211's tx status and rx paths.
  * @tx_lock:           protects @tx_q and changes of MT7601U_STATE_*_STATS
                      flags in @state.
*                     flags in @state.
  * @rx_lock:           protects @rx_q.
  * @con_mon_lock:      protects @ap_bssid, @bcn_*, @avg_rssi.
  * @mutex:             ensures exclusive access from mac80211 callbacks.
- * @vendor_req_mutex:  ensures atomicity of vendor requests.
+ * @vendor_req_mutex:  protects @vend_buf, ensures atomicity of split writes.
  * @reg_atomic_mutex:  ensures atomicity of indirect register accesses
  *                     (accesses to RF and BBP).
  * @hw_atomic_mutex:   ensures exclusive access to HW during critical
@@ -177,6 +178,7 @@ struct mt7601u_dev {
        struct mt76_wcid __rcu *wcid[N_WCIDS];
 
        spinlock_t lock;
+       spinlock_t mac_lock;
 
        const u16 *beacon_offsets;
 
@@ -184,6 +186,8 @@ struct mt7601u_dev {
        struct mt7601u_eeprom_params *ee;
 
        struct mutex vendor_req_mutex;
+       void *vend_buf;
+
        struct mutex reg_atomic_mutex;
        struct mutex hw_atomic_mutex;
 
@@ -197,7 +201,9 @@ struct mt7601u_dev {
 
        /* TX */
        spinlock_t tx_lock;
+       struct tasklet_struct tx_tasklet;
        struct mt7601u_tx_queue *tx_q;
+       struct sk_buff_head tx_skb_done;
 
        atomic_t avg_ampdu_len;
 
index 0be2080ceab387ef9e21c1849b1e96e4eb635309..a0a33dc8f6bcbd31c1b58012f6811bf6c9d19720 100644 (file)
@@ -116,7 +116,10 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
        ieee80211_tx_info_clear_status(info);
        info->status.rates[0].idx = -1;
        info->flags |= IEEE80211_TX_STAT_ACK;
+
+       spin_lock(&dev->mac_lock);
        ieee80211_tx_status(dev->hw, skb);
+       spin_unlock(&dev->mac_lock);
 }
 
 static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
index 54dba400186511bf2842972f631bd2dde51c5a46..416c6045ff3128005664fd8600d97c3cba9245a9 100644 (file)
@@ -92,10 +92,9 @@ void mt7601u_complete_urb(struct urb *urb)
        complete(cmpl);
 }
 
-static int
-__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
-                        const u8 direction, const u16 val, const u16 offset,
-                        void *buf, const size_t buflen)
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+                          const u8 direction, const u16 val, const u16 offset,
+                          void *buf, const size_t buflen)
 {
        int i, ret;
        struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
@@ -110,6 +109,8 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
                trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
                                  buf, buflen, ret);
 
+               if (ret == -ENODEV)
+                       set_bit(MT7601U_STATE_REMOVED, &dev->state);
                if (ret >= 0 || ret == -ENODEV)
                        return ret;
 
@@ -122,25 +123,6 @@ __mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
        return ret;
 }
 
-int
-mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
-                      const u8 direction, const u16 val, const u16 offset,
-                      void *buf, const size_t buflen)
-{
-       int ret;
-
-       mutex_lock(&dev->vendor_req_mutex);
-
-       ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
-                                      buf, buflen);
-       if (ret == -ENODEV)
-               set_bit(MT7601U_STATE_REMOVED, &dev->state);
-
-       mutex_unlock(&dev->vendor_req_mutex);
-
-       return ret;
-}
-
 void mt7601u_vendor_reset(struct mt7601u_dev *dev)
 {
        mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
@@ -150,19 +132,21 @@ void mt7601u_vendor_reset(struct mt7601u_dev *dev)
 u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
 {
        int ret;
-       __le32 reg;
-       u32 val;
+       u32 val = ~0;
 
        WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
 
+       mutex_lock(&dev->vendor_req_mutex);
+
        ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
-                                    0, offset, &reg, sizeof(reg));
-       val = le32_to_cpu(reg);
-       if (ret > 0 && ret != sizeof(reg)) {
+                                    0, offset, dev->vend_buf, MT_VEND_BUF);
+       if (ret == MT_VEND_BUF)
+               val = get_unaligned_le32(dev->vend_buf);
+       else if (ret > 0)
                dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
                        ret, offset);
-               val = ~0;
-       }
+
+       mutex_unlock(&dev->vendor_req_mutex);
 
        trace_reg_read(dev, offset, val);
        return val;
@@ -173,12 +157,17 @@ int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req,
 {
        int ret;
 
+       mutex_lock(&dev->vendor_req_mutex);
+
        ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
                                     val & 0xffff, offset, NULL, 0);
-       if (ret)
-               return ret;
-       return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
-                                     val >> 16, offset + 2, NULL, 0);
+       if (!ret)
+               ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+                                            val >> 16, offset + 2, NULL, 0);
+
+       mutex_unlock(&dev->vendor_req_mutex);
+
+       return ret;
 }
 
 void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
@@ -275,6 +264,12 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
 
        usb_set_intfdata(usb_intf, dev);
 
+       dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
+       if (!dev->vend_buf) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
        ret = mt7601u_assign_pipes(usb_intf, dev);
        if (ret)
                goto err;
index 49e188fa37983788b99bcd082e82ef8f5a43e4f7..bc182022b9d6398ed748ce7b3b7153c9831d8d4a 100644 (file)
@@ -23,6 +23,8 @@
 
 #define MT_VEND_DEV_MODE_RESET 1
 
+#define MT_VEND_BUF            sizeof(__le32)
+
 enum mt_vendor_req {
        MT_VEND_DEV_MODE = 1,
        MT_VEND_WRITE = 2,
index 48edf387683ebbd79a98f5257689f816378be3cf..317d99189556ab1c3025bf11afd7b61449fd124e 100644 (file)
@@ -9,36 +9,36 @@ config MWIFIEX
          mwifiex.
 
 config MWIFIEX_SDIO
-       tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
+       tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
        depends on MWIFIEX && MMC
        select FW_LOADER
        select WANT_DEV_COREDUMP
        ---help---
          This adds support for wireless adapters based on Marvell
-         8786/8787/8797/8887/8897 chipsets with SDIO interface.
+         8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_sdio.
 
 config MWIFIEX_PCIE
-       tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
+       tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897/8997"
        depends on MWIFIEX && PCI
        select FW_LOADER
        select WANT_DEV_COREDUMP
        ---help---
          This adds support for wireless adapters based on Marvell
-         8766/8897 chipsets with PCIe interface.
+         8766/8897/8997 chipsets with PCIe interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_pcie.
 
 config MWIFIEX_USB
-       tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897"
+       tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
        depends on MWIFIEX && USB
        select FW_LOADER
        ---help---
          This adds support for wireless adapters based on Marvell
-         8797/8897 chipset with USB interface.
+         8797/8897/8997 chipset with USB interface.
 
          If you choose to build it as a module, it will be called
          mwifiex_usb.
index b15e4c7acbecd2ed44b1e76360464ef23f3fd9f3..ff63cb5632eb089a817422dddfa9dda0069b2478 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "cfg80211.h"
 #include "main.h"
+#include "11n.h"
 
 static char *reg_alpha2;
 module_param(reg_alpha2, charp, 0);
@@ -34,12 +35,38 @@ static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
        },
 };
 
-static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta = {
        .limits = mwifiex_ap_sta_limits,
        .num_different_channels = 1,
        .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
        .max_interfaces = MWIFIEX_MAX_BSS_NUM,
        .beacon_int_infra_match = true,
+       .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                               BIT(NL80211_CHAN_WIDTH_20) |
+                               BIT(NL80211_CHAN_WIDTH_40),
+};
+
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta_vht = {
+       .limits = mwifiex_ap_sta_limits,
+       .num_different_channels = 1,
+       .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+       .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+       .beacon_int_infra_match = true,
+       .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                               BIT(NL80211_CHAN_WIDTH_20) |
+                               BIT(NL80211_CHAN_WIDTH_40) |
+                               BIT(NL80211_CHAN_WIDTH_80),
+};
+
+static const struct
+ieee80211_iface_combination mwifiex_iface_comb_ap_sta_drcs = {
+       .limits = mwifiex_ap_sta_limits,
+       .num_different_channels = 2,
+       .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+       .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+       .beacon_int_infra_match = true,
 };
 
 /*
@@ -441,7 +468,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
  *      - Country codes
  *      - Sub bands (first channel, number of channels, maximum Tx power)
  */
-static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
 {
        u8 no_of_triplet = 0;
        struct ieee80211_country_ie_triplet *t;
@@ -804,10 +831,13 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
                priv->bss_type = MWIFIEX_BSS_TYPE_STA;
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
-       case NL80211_IFTYPE_P2P_GO:
                priv->bss_role =  MWIFIEX_BSS_ROLE_STA;
                priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
                break;
+       case NL80211_IFTYPE_P2P_GO:
+               priv->bss_role =  MWIFIEX_BSS_ROLE_UAP;
+               priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+               break;
        case NL80211_IFTYPE_AP:
                priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
                priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
@@ -1115,8 +1145,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
        case NL80211_IFTYPE_P2P_GO:
                switch (type) {
                case NL80211_IFTYPE_STATION:
-                       if (mwifiex_cfg80211_init_p2p_client(priv))
+                       if (mwifiex_cfg80211_deinit_p2p(priv))
                                return -EFAULT;
+                       priv->adapter->curr_iface_comb.p2p_intf--;
+                       priv->adapter->curr_iface_comb.sta_intf++;
                        dev->ieee80211_ptr->iftype = type;
                        break;
                case NL80211_IFTYPE_ADHOC:
@@ -2788,6 +2820,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
        struct mwifiex_adapter *adapter = priv->adapter;
+       struct sk_buff *skb, *tmp;
 
 #ifdef CONFIG_DEBUG_FS
        mwifiex_dev_debugfs_remove(priv);
@@ -2795,6 +2828,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 
        mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
+       skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+               mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+
        if (netif_carrier_ok(priv->netdev))
                netif_carrier_off(priv->netdev);
 
@@ -2954,7 +2990,6 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
                                        MWIFIEX_MEF_MAX_BYTESEQ)) {
                        mwifiex_dbg(priv->adapter, ERROR,
                                    "Pattern not supported\n");
-                       kfree(mef_entry);
                        return -EOPNOTSUPP;
                }
 
@@ -3036,9 +3071,12 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
 
        mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
 
-       if (wowlan->n_patterns || wowlan->magic_pkt)
+       if (wowlan->n_patterns || wowlan->magic_pkt) {
                ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
                                                   &mef_entry[1], wowlan);
+               if (ret)
+                       goto err;
+       }
 
        if (!mef_cfg.criteria)
                mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -3048,6 +3086,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
        ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
                        HostCmd_ACT_GEN_SET, 0,
                        &mef_cfg, true);
+
+err:
        kfree(mef_entry);
        return ret;
 }
@@ -3359,6 +3399,72 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
        return mwifiex_tdls_oper(priv, peer, action);
 }
 
+static int
+mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
+                                 const u8 *addr, u8 oper_class,
+                                 struct cfg80211_chan_def *chandef)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       unsigned long flags;
+       u16 chan;
+       u8 second_chan_offset, band;
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       sta_ptr = mwifiex_get_sta_entry(priv, addr);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+       if (!sta_ptr) {
+               wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+                         __func__, addr);
+               return -ENOENT;
+       }
+
+       if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
+             WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
+               wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
+               return -ENOENT;
+       }
+
+       if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+           sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
+               wiphy_err(wiphy, "channel switch is running, abort request\n");
+               return -EALREADY;
+       }
+
+       chan = chandef->chan->hw_value;
+       second_chan_offset = mwifiex_get_sec_chan_offset(chan);
+       band = chandef->chan->band;
+       mwifiex_start_tdls_cs(priv, addr, chan, second_chan_offset, band);
+
+       return 0;
+}
+
+static void
+mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
+                                        struct net_device *dev,
+                                        const u8 *addr)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       unsigned long flags;
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       sta_ptr = mwifiex_get_sta_entry(priv, addr);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+       if (!sta_ptr) {
+               wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+                         __func__, addr);
+       } else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+                    sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
+                    sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
+               wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
+                         addr);
+       } else
+               mwifiex_stop_tdls_cs(priv, addr);
+}
+
 static int
 mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
                             const u8 *mac, struct station_parameters *params)
@@ -3575,6 +3681,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .set_coalesce = mwifiex_cfg80211_set_coalesce,
        .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
        .tdls_oper = mwifiex_cfg80211_tdls_oper,
+       .tdls_channel_switch = mwifiex_cfg80211_tdls_chan_switch,
+       .tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
        .add_station = mwifiex_cfg80211_add_station,
        .change_station = mwifiex_cfg80211_change_station,
        .get_channel = mwifiex_cfg80211_get_channel,
@@ -3672,7 +3780,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        else
                wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
 
-       wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
+       if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+               wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+       else if (adapter->is_hw_11ac_capable)
+               wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_vht;
+       else
+               wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
        wiphy->n_iface_combinations = 1;
 
        /* Initialize cipher suits */
@@ -3709,6 +3822,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                           NL80211_FEATURE_INACTIVITY_TIMER |
                           NL80211_FEATURE_NEED_OBSS_SCAN;
 
+       if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+               wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+
        if (adapter->fw_api_ver == MWIFIEX_FW_V15)
                wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
 
index 207da40500f4309fcdd0405e99365f4aa274f502..45ae38e32621805edb24168963a6ee915754148c 100644 (file)
@@ -167,8 +167,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
                mwifiex_dbg(adapter, ERROR,
                            "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
                        cmd_code);
-               if (cmd_node->wait_q_enabled)
-                       mwifiex_complete_cmd(adapter, cmd_node);
                mwifiex_recycle_cmd_node(adapter, cmd_node);
                queue_work(adapter->workqueue, &adapter->main_work);
                return -1;
@@ -809,17 +807,6 @@ int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
        adapter->is_cmd_timedout = 0;
 
        resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
-       if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
-               mwifiex_dbg(adapter, ERROR,
-                           "CMD_RESP: %#x been canceled\n",
-                           le16_to_cpu(resp->command));
-               mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
-               spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-               adapter->curr_cmd = NULL;
-               spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
-               return -1;
-       }
-
        if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
                /* Copy original response back to response buffer */
                struct mwifiex_ds_misc_cmd *hostcmd;
@@ -989,12 +976,13 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
 
                if (cmd_node->wait_q_enabled) {
                        adapter->cmd_wait_q.status = -ETIMEDOUT;
-                       wake_up_interruptible(&adapter->cmd_wait_q.wait);
                        mwifiex_cancel_pending_ioctl(adapter);
                }
        }
-       if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+       if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
                mwifiex_init_fw_complete(adapter);
+               return;
+       }
 
        if (adapter->if_ops.device_dump)
                adapter->if_ops.device_dump(adapter);
@@ -1024,6 +1012,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
                adapter->curr_cmd->wait_q_enabled = false;
                adapter->cmd_wait_q.status = -1;
                mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               /* no recycle probably wait for response */
        }
        /* Cancel all pending command */
        spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
@@ -1032,11 +1021,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
                list_del(&cmd_node->list);
                spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
 
-               if (cmd_node->wait_q_enabled) {
+               if (cmd_node->wait_q_enabled)
                        adapter->cmd_wait_q.status = -1;
-                       mwifiex_complete_cmd(adapter, cmd_node);
-                       cmd_node->wait_q_enabled = false;
-               }
                mwifiex_recycle_cmd_node(adapter, cmd_node);
                spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
        }
@@ -1094,12 +1080,18 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
            (adapter->curr_cmd->wait_q_enabled)) {
                spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
                cmd_node = adapter->curr_cmd;
-               cmd_node->wait_q_enabled = false;
-               cmd_node->cmd_flag |= CMD_F_CANCELED;
-               mwifiex_recycle_cmd_node(adapter, cmd_node);
-               mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               /* setting curr_cmd to NULL is quite dangerous, because
+                * mwifiex_process_cmdresp checks curr_cmd to be != NULL
+                * at the beginning then relies on it and dereferences
+                * it at will
+                * this probably works since mwifiex_cmd_timeout_func
+                * is the only caller of this function and responses
+                * at that point
+                */
                adapter->curr_cmd = NULL;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+
+               mwifiex_recycle_cmd_node(adapter, cmd_node);
        }
 
        /* Cancel all pending scan command */
@@ -1129,7 +1121,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                        }
                }
        }
-       adapter->cmd_wait_q.status = -1;
 }
 
 /*
index 51e344789ba214bbbd5cfe7dc29a0a6ac1244527..098e1f14dc9a47efc4aae6e9611f1165c2af5624 100644 (file)
@@ -141,6 +141,9 @@ enum mwifiex_tdls_status {
        TDLS_SETUP_COMPLETE,
        TDLS_SETUP_FAILURE,
        TDLS_LINK_TEARDOWN,
+       TDLS_CHAN_SWITCHING,
+       TDLS_IN_BASE_CHAN,
+       TDLS_IN_OFF_CHAN,
 };
 
 enum mwifiex_tdls_error_code {
index cd09051710e6cee82c624e6960f4c8accd452338..3ec2ac82e394a158e6c346d75e73a29715e7ec29 100644 (file)
@@ -169,14 +169,17 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_UAP_PS_AO_TIMER    (PROPRIETARY_TLV_BASE_ID + 123)
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_TX_PAUSE           (PROPRIETARY_TLV_BASE_ID + 148)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_MULTI_CHAN_INFO    (PROPRIETARY_TLV_BASE_ID + 183)
 #define TLV_TYPE_TDLS_IDLE_TIMEOUT  (PROPRIETARY_TLV_BASE_ID + 194)
 #define TLV_TYPE_SCAN_CHANNEL_GAP   (PROPRIETARY_TLV_BASE_ID + 197)
 #define TLV_TYPE_API_REV            (PROPRIETARY_TLV_BASE_ID + 199)
 #define TLV_TYPE_CHANNEL_STATS      (PROPRIETARY_TLV_BASE_ID + 198)
 #define TLV_BTCOEX_WL_AGGR_WINSIZE  (PROPRIETARY_TLV_BASE_ID + 202)
 #define TLV_BTCOEX_WL_SCANTIME      (PROPRIETARY_TLV_BASE_ID + 203)
+#define TLV_TYPE_BSS_MODE           (PROPRIETARY_TLV_BASE_ID + 206)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
 
@@ -200,6 +203,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 
 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
 #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
 #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
 
 #define MWIFIEX_DEF_HT_CAP     (IEEE80211_HT_CAP_DSSSCCK40 | \
@@ -359,6 +363,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG                         0x0112
+#define HostCmd_CMD_TDLS_CONFIG                       0x0100
+#define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
 
@@ -432,7 +438,6 @@ enum P2P_MODES {
 
 
 #define CMD_F_HOSTCMD           (1 << 0)
-#define CMD_F_CANCELED          (1 << 1)
 
 #define HostCmd_CMD_ID_MASK             0x0fff
 
@@ -509,8 +514,10 @@ enum P2P_MODES {
 #define EVENT_TDLS_GENERIC_EVENT        0x00000052
 #define EVENT_RADAR_DETECTED           0x00000053
 #define EVENT_CHANNEL_REPORT_RDY        0x00000054
+#define EVENT_TX_DATA_PAUSE             0x00000055
 #define EVENT_EXT_SCAN_REPORT           0x00000058
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
+#define EVENT_MULTI_CHAN_INFO           0x0000006a
 #define EVENT_TX_STATUS_REPORT         0x00000074
 #define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076
 
@@ -545,7 +552,27 @@ enum P2P_MODES {
 #define ACT_TDLS_DELETE            0x00
 #define ACT_TDLS_CREATE            0x01
 #define ACT_TDLS_CONFIG            0x02
-#define TDLS_EVENT_LINK_TEAR_DOWN  3
+
+#define TDLS_EVENT_LINK_TEAR_DOWN      3
+#define TDLS_EVENT_CHAN_SWITCH_RESULT  7
+#define TDLS_EVENT_START_CHAN_SWITCH   8
+#define TDLS_EVENT_CHAN_SWITCH_STOPPED 9
+
+#define TDLS_BASE_CHANNEL             0
+#define TDLS_OFF_CHANNEL              1
+
+#define ACT_TDLS_CS_ENABLE_CONFIG 0x00
+#define ACT_TDLS_CS_INIT         0x06
+#define ACT_TDLS_CS_STOP         0x07
+#define ACT_TDLS_CS_PARAMS       0x08
+
+#define MWIFIEX_DEF_CS_UNIT_TIME       2
+#define MWIFIEX_DEF_CS_THR_OTHERLINK   10
+#define MWIFIEX_DEF_THR_DIRECTLINK     0
+#define MWIFIEX_DEF_CS_TIME            10
+#define MWIFIEX_DEF_CS_TIMEOUT         16
+#define MWIFIEX_DEF_CS_REG_CLASS       12
+#define MWIFIEX_DEF_CS_PERIODICITY     1
 
 #define MWIFIEX_FW_V15            15
 
@@ -658,6 +685,7 @@ struct mwifiex_fw_chan_stats {
 enum mwifiex_chan_scan_mode_bitmasks {
        MWIFIEX_PASSIVE_SCAN = BIT(0),
        MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
+       MWIFIEX_HIDDEN_SSID_REPORT = BIT(4),
 };
 
 struct mwifiex_chan_scan_param_set {
@@ -1131,6 +1159,13 @@ struct host_cmd_ds_tx_rate_query {
        u8 ht_info;
 } __packed;
 
+struct mwifiex_tx_pause_tlv {
+       struct mwifiex_ie_types_header header;
+       u8 peermac[ETH_ALEN];
+       u8 tx_pause;
+       u8 pkt_cnt;
+} __packed;
+
 enum Host_Sleep_Action {
        HS_CONFIGURE = 0x0001,
        HS_ACTIVATE  = 0x0002,
@@ -1249,6 +1284,36 @@ struct host_cmd_ds_tdls_oper {
        u8 peer_mac[ETH_ALEN];
 } __packed;
 
+struct mwifiex_tdls_config {
+       __le16 enable;
+};
+
+struct mwifiex_tdls_config_cs_params {
+       u8 unit_time;
+       u8 thr_otherlink;
+       u8 thr_directlink;
+};
+
+struct mwifiex_tdls_init_cs_params {
+       u8 peer_mac[ETH_ALEN];
+       u8 primary_chan;
+       u8 second_chan_offset;
+       u8 band;
+       __le16 switch_time;
+       __le16 switch_timeout;
+       u8 reg_class;
+       u8 periodicity;
+} __packed;
+
+struct mwifiex_tdls_stop_cs_params {
+       u8 peer_mac[ETH_ALEN];
+};
+
+struct host_cmd_ds_tdls_config {
+       __le16 tdls_action;
+       u8 tdls_data[1];
+} __packed;
+
 struct mwifiex_chan_desc {
        __le16 start_freq;
        u8 chan_width;
@@ -1370,6 +1435,11 @@ struct host_cmd_ds_802_11_scan_ext {
        u8    tlv_buffer[1];
 } __packed;
 
+struct mwifiex_ie_types_bss_mode {
+       struct mwifiex_ie_types_header  header;
+       u8 bss_mode;
+} __packed;
+
 struct mwifiex_ie_types_bss_scan_rsp {
        struct mwifiex_ie_types_header header;
        u8 bssid[ETH_ALEN];
@@ -1908,6 +1978,12 @@ struct mwifiex_radar_det_event {
        __le32 passed;
 } __packed;
 
+struct mwifiex_ie_types_multi_chan_info {
+       struct mwifiex_ie_types_header header;
+       __le16 status;
+       u8 tlv_buffer[0];
+} __packed;
+
 struct meas_rpt_map {
        u8 rssi:3;
        u8 unmeasured:1;
@@ -1927,10 +2003,18 @@ struct host_cmd_ds_802_11_subsc_evt {
        __le16 events;
 } __packed;
 
+struct chan_switch_result {
+       u8 cur_chan;
+       u8 status;
+       u8 reason;
+} __packed;
+
 struct mwifiex_tdls_generic_event {
        __le16 type;
        u8 peer_mac[ETH_ALEN];
        union {
+               struct chan_switch_result switch_result;
+               u8 cs_stop_reason;
                __le16 reason_code;
                __le16 reserved;
        } u;
@@ -1971,6 +2055,11 @@ struct host_cmd_ds_coalesce_cfg {
        struct coalesce_receive_filt_rule rule[0];
 } __packed;
 
+struct host_cmd_ds_multi_chan_policy {
+       __le16 action;
+       __le16 policy;
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -2035,9 +2124,11 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_sta_list sta_list;
                struct host_cmd_11ac_vht_cfg vht_cfg;
                struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+               struct host_cmd_ds_tdls_config tdls_config;
                struct host_cmd_ds_tdls_oper tdls_oper;
                struct host_cmd_ds_chan_rpt_req chan_rpt_req;
                struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
+               struct host_cmd_ds_multi_chan_policy mc_policy;
        } params;
 } __packed;
 
index 0ba8945094139c5e02374a11034e66777186c074..abf52d25b9815b720399221e2da00bff9381a2c9 100644 (file)
@@ -409,6 +409,8 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
        int ret;
 
        ret = mwifiex_uap_parse_tail_ies(priv, info);
+
+       if (ret)
                return ret;
 
        return mwifiex_set_mgmt_beacon_data_ies(priv, info);
@@ -477,6 +479,7 @@ int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
                                                   ar_ie, &priv->assocresp_idx);
 
 done:
+       kfree(gen_ie);
        kfree(beacon_ie);
        kfree(pr_ie);
        kfree(ar_ie);
index df7fdc09d38c7f1e326b59ad8fd0a9ea53485f74..5d3ae63baea4c1950203563f712bda2f83c04988 100644 (file)
@@ -77,7 +77,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
 
        priv->media_connected = false;
        eth_broadcast_addr(priv->curr_addr);
-
+       priv->port_open = false;
        priv->pkt_tx_ctrl = 0;
        priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
        priv->data_rate = 0;    /* Initially indicate the rate as auto */
@@ -301,7 +301,7 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
        adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
        adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
        adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
-
+       adapter->active_scan_triggered = false;
        setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
                    (unsigned long)adapter);
 }
@@ -499,6 +499,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                INIT_LIST_HEAD(&priv->sta_list);
                INIT_LIST_HEAD(&priv->auto_tdls_list);
                skb_queue_head_init(&priv->tdls_txq);
+               skb_queue_head_init(&priv->bypass_txq);
 
                spin_lock_init(&priv->tx_ba_stream_tbl_lock);
                spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -550,11 +551,6 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter)
                }
        }
 
-       if (adapter->if_ops.init_fw_port) {
-               if (adapter->if_ops.init_fw_port(adapter))
-                       return -1;
-       }
-
        for (i = 0; i < adapter->priv_num; i++) {
                if (adapter->priv[i]) {
                        ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
index 56b024a6aaa58d2b89ca25692c37163367eda597..3cda1f956f0b1654ec438291e22f416ada279e17 100644 (file)
@@ -783,6 +783,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
 
        if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
                priv->scan_block = true;
+       else
+               priv->port_open = true;
 
 done:
        /* Need to indicate IOCTL complete */
index 3ba4e0e04223bcde4fd0da162db6bb6db6160b62..278dc94eaecbb28e12a075526572c342e52f31cf 100644 (file)
@@ -276,6 +276,7 @@ process_start:
                     !adapter->pm_wakeup_fw_try) &&
                    (is_command_pending(adapter) ||
                     !skb_queue_empty(&adapter->tx_data_q) ||
+                    !mwifiex_bypass_txlist_empty(adapter) ||
                     !mwifiex_wmm_lists_empty(adapter))) {
                        adapter->pm_wakeup_fw_try = true;
                        mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
@@ -299,9 +300,16 @@ process_start:
 
                        if ((!adapter->scan_chan_gap_enabled &&
                             adapter->scan_processing) || adapter->data_sent ||
+                            mwifiex_is_tdls_chan_switching
+                            (mwifiex_get_priv(adapter,
+                                              MWIFIEX_BSS_ROLE_STA)) ||
                            (mwifiex_wmm_lists_empty(adapter) &&
+                            mwifiex_bypass_txlist_empty(adapter) &&
                             skb_queue_empty(&adapter->tx_data_q))) {
                                if (adapter->cmd_sent || adapter->curr_cmd ||
+                                       !mwifiex_is_send_cmd_allowed
+                                               (mwifiex_get_priv(adapter,
+                                               MWIFIEX_BSS_ROLE_STA)) ||
                                    (!is_command_pending(adapter)))
                                        break;
                        }
@@ -342,7 +350,9 @@ process_start:
                        continue;
                }
 
-               if (!adapter->cmd_sent && !adapter->curr_cmd) {
+               if (!adapter->cmd_sent && !adapter->curr_cmd &&
+                   mwifiex_is_send_cmd_allowed
+                   (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
                        if (mwifiex_exec_next_cmd(adapter) == -1) {
                                ret = -1;
                                break;
@@ -365,7 +375,25 @@ process_start:
 
                if ((adapter->scan_chan_gap_enabled ||
                     !adapter->scan_processing) &&
-                   !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
+                   !adapter->data_sent &&
+                   !mwifiex_bypass_txlist_empty(adapter) &&
+                   !mwifiex_is_tdls_chan_switching
+                       (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
+                       mwifiex_process_bypass_tx(adapter);
+                       if (adapter->hs_activated) {
+                               adapter->is_hs_configured = false;
+                               mwifiex_hs_activated_event
+                                       (mwifiex_get_priv
+                                        (adapter, MWIFIEX_BSS_ROLE_ANY),
+                                        false);
+                       }
+               }
+
+               if ((adapter->scan_chan_gap_enabled ||
+                    !adapter->scan_processing) &&
+                   !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) &&
+                   !mwifiex_is_tdls_chan_switching
+                       (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
                        mwifiex_wmm_process_tx(adapter);
                        if (adapter->hs_activated) {
                                adapter->is_hs_configured = false;
@@ -379,6 +407,7 @@ process_start:
                if (adapter->delay_null_pkt && !adapter->cmd_sent &&
                    !adapter->curr_cmd && !is_command_pending(adapter) &&
                    (mwifiex_wmm_lists_empty(adapter) &&
+                    mwifiex_bypass_txlist_empty(adapter) &&
                     skb_queue_empty(&adapter->tx_data_q))) {
                        if (!mwifiex_send_null_packet
                            (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -649,6 +678,26 @@ mwifiex_close(struct net_device *dev)
        return 0;
 }
 
+static bool
+mwifiex_bypass_tx_queue(struct mwifiex_private *priv,
+                       struct sk_buff *skb)
+{
+       struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+
+       if (ntohs(eth_hdr->h_proto) == ETH_P_PAE ||
+           mwifiex_is_skb_mgmt_frame(skb) ||
+           (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+            ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+            (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) {
+               mwifiex_dbg(priv->adapter, DATA,
+                           "bypass txqueue; eth type %#x, mgmt %d\n",
+                            ntohs(eth_hdr->h_proto),
+                            mwifiex_is_skb_mgmt_frame(skb));
+               return true;
+       }
+
+       return false;
+}
 /*
  * Add buffer into wmm tx queue and queue work to transmit it.
  */
@@ -666,8 +715,14 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
                }
        }
 
-       atomic_inc(&priv->adapter->tx_pending);
-       mwifiex_wmm_add_buf_txqueue(priv, skb);
+       if (mwifiex_bypass_tx_queue(priv, skb)) {
+               atomic_inc(&priv->adapter->tx_pending);
+               atomic_inc(&priv->adapter->bypass_tx_pending);
+               mwifiex_wmm_add_buf_bypass_txqueue(priv, skb);
+        } else {
+               atomic_inc(&priv->adapter->tx_pending);
+               mwifiex_wmm_add_buf_txqueue(priv, skb);
+        }
 
        mwifiex_queue_main_work(priv->adapter);
 
index ae98b5b83b1f0133cc216c2942095081ded5b52d..6b9512140e7aa6b21d36e55d128486cf29210e74 100644 (file)
@@ -281,6 +281,7 @@ struct mwifiex_ra_list_tbl {
        u8 amsdu_in_ampdu;
        u16 total_pkt_count;
        bool tdls_link;
+       bool tx_paused;
 };
 
 struct mwifiex_tid_tbl {
@@ -294,6 +295,7 @@ struct mwifiex_tid_tbl {
 struct mwifiex_wmm_desc {
        struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
        u32 packets_out[MAX_NUM_TID];
+       u32 pkts_paused[MAX_NUM_TID];
        /* spin lock to protect ra_list */
        spinlock_t ra_list_spinlock;
        struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
@@ -517,6 +519,7 @@ struct mwifiex_private {
        u8 frame_type;
        u8 curr_addr[ETH_ALEN];
        u8 media_connected;
+       u8 port_open;
        u32 num_tx_timeout;
        /* track consecutive timeout */
        u8 tx_timeout_cnt;
@@ -662,6 +665,8 @@ struct mwifiex_private {
        struct cfg80211_beacon_data beacon_after;
        struct mwifiex_11h_intf_state state_11h;
        struct mwifiex_ds_mem_rw mem_rw;
+       struct sk_buff_head bypass_txq;
+       struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
 };
 
 
@@ -768,6 +773,7 @@ struct mwifiex_sta_node {
        u8 tdls_status;
        struct mwifiex_tdls_capab tdls_cap;
        struct mwifiex_station_stats stats;
+       u8 tx_pause;
 };
 
 struct mwifiex_auto_tdls_peer {
@@ -831,6 +837,7 @@ struct mwifiex_adapter {
        wait_queue_head_t init_wait_q;
        void *card;
        struct mwifiex_if_ops if_ops;
+       atomic_t bypass_tx_pending;
        atomic_t rx_pending;
        atomic_t tx_pending;
        atomic_t cmd_pending;
@@ -979,6 +986,8 @@ struct mwifiex_adapter {
        u8 coex_win_size;
        u8 coex_tx_win_size;
        u8 coex_rx_win_size;
+       bool drcs_enabled;
+       u8 active_scan_triggered;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1330,6 +1339,21 @@ static inline u8 mwifiex_is_any_intf_active(struct mwifiex_private *priv)
        return 0;
 }
 
+static inline u8 mwifiex_is_tdls_link_setup(u8 status)
+{
+       switch (status) {
+       case TDLS_SETUP_COMPLETE:
+       case TDLS_CHAN_SWITCHING:
+       case TDLS_IN_BASE_CHAN:
+       case TDLS_IN_OFF_CHAN:
+               return true;
+       default:
+               break;
+       }
+
+       return false;
+}
+
 int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
                             u32 func_init_shutdown);
 int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1458,6 +1482,9 @@ struct mwifiex_sta_node *
 mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
 struct mwifiex_sta_node *
 mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv);
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv);
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv);
 int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
                                 u8 action_code, u8 dialog_token,
                                 u16 status_code, const u8 *extra_ies,
@@ -1488,6 +1515,13 @@ void mwifiex_check_auto_tdls(unsigned long context);
 void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
 void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
 void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv);
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac);
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+                         u8 primary_chan, u8 second_chan_offset, u8 band);
+
 int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
                                          struct host_cmd_ds_command *cmd,
                                          void *data_buf);
@@ -1522,6 +1556,12 @@ void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
 void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+                                   struct sk_buff *event);
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+                                     struct sk_buff *event_skb);
+
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
 void mwifiex_debugfs_remove(void);
index 77b9055a2d147411515b5875f67b90210938ac9b..408b6846071655cbd87385fcbb1eb1f41b0fc181 100644 (file)
@@ -266,12 +266,17 @@ static const struct pci_device_id mwifiex_ids[] = {
        {
                PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               .driver_data = (unsigned long) &mwifiex_pcie8766,
+               .driver_data = (unsigned long)&mwifiex_pcie8766,
        },
        {
                PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-               .driver_data = (unsigned long) &mwifiex_pcie8897,
+               .driver_data = (unsigned long)&mwifiex_pcie8897,
+       },
+       {
+               PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               .driver_data = (unsigned long)&mwifiex_pcie8997,
        },
        {},
 };
@@ -1082,6 +1087,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
                        card->txbd_rdptr++;
                        break;
                case PCIE_DEVICE_ID_MARVELL_88W8897:
+               case PCIE_DEVICE_ID_MARVELL_88W8997:
                        card->txbd_rdptr += reg->ring_tx_start_ptr;
                        break;
                }
@@ -1179,6 +1185,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                        card->txbd_wrptr++;
                        break;
                case PCIE_DEVICE_ID_MARVELL_88W8897:
+               case PCIE_DEVICE_ID_MARVELL_88W8997:
                        card->txbd_wrptr += reg->ring_tx_start_ptr;
                        break;
                }
@@ -1807,6 +1814,8 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
 
        if (!card->evt_buf_list[rdptr]) {
                skb_push(skb, INTF_HEADER_LEN);
+               skb_put(skb, MAX_EVENT_SIZE - skb->len);
+               memset(skb->data, 0, MAX_EVENT_SIZE);
                if (mwifiex_map_pci_memory(adapter, skb,
                                           MAX_EVENT_SIZE,
                                           PCI_DMA_FROMDEVICE))
@@ -2731,3 +2740,4 @@ MODULE_VERSION(PCIE_VERSION);
 MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
index 0e7ee8b72358f7feba632f43349113a6e662b210..48e549c3b285b362b2a7ffe97b0106df04c10eb7 100644 (file)
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
 #define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin"
 
 #define PCIE_VENDOR_ID_MARVELL              (0x11ab)
 #define PCIE_DEVICE_ID_MARVELL_88W8766P                (0x2b30)
 #define PCIE_DEVICE_ID_MARVELL_88W8897         (0x2b38)
+#define PCIE_DEVICE_ID_MARVELL_88W8997         (0x2b42)
 
 /* Constants for Buffer Descriptor (BD) rings */
 #define MWIFIEX_MAX_TXRX_BD                    0x20
@@ -197,7 +199,38 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
        .sleep_cookie = 0,
        .fw_dump_ctrl = 0xcf4,
        .fw_dump_start = 0xcf8,
-       .fw_dump_end = 0xcff
+       .fw_dump_end = 0xcff,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
+       .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+       .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+       .cmd_size = PCIE_SCRATCH_2_REG,
+       .fw_status = PCIE_SCRATCH_3_REG,
+       .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+       .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+       .tx_rdptr = 0xC1A4,
+       .tx_wrptr = 0xC1A8,
+       .rx_rdptr = 0xC1A8,
+       .rx_wrptr = 0xC1A4,
+       .evt_rdptr = PCIE_SCRATCH_10_REG,
+       .evt_wrptr = PCIE_SCRATCH_11_REG,
+       .drv_rdy = PCIE_SCRATCH_12_REG,
+       .tx_start_ptr = 16,
+       .tx_mask = 0x0FFF0000,
+       .tx_wrap_mask = 0x01FF0000,
+       .rx_mask = 0x00000FFF,
+       .rx_wrap_mask = 0x000001FF,
+       .tx_rollover_ind = BIT(28),
+       .rx_rollover_ind = BIT(12),
+       .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+       .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+       .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+       .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+       .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+       .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+       .pfu_enabled = 1,
+       .sleep_cookie = 0,
 };
 
 struct mwifiex_pcie_device {
@@ -227,6 +260,15 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
        .can_ext_scan = true,
 };
 
+static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+       .firmware       = PCIE8997_DEFAULT_FW_NAME,
+       .reg            = &mwifiex_reg_8997,
+       .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .can_dump_fw = false,
+       .can_ext_scan = true,
+};
+
 struct mwifiex_evt_buf_desc {
        u64 paddr;
        u16 len;
@@ -325,6 +367,7 @@ mwifiex_pcie_txbd_not_full(struct pcie_service_card *card)
                        return 1;
                break;
        case PCIE_DEVICE_ID_MARVELL_88W8897:
+       case PCIE_DEVICE_ID_MARVELL_88W8997:
                if (((card->txbd_wrptr & reg->tx_mask) !=
                     (card->txbd_rdptr & reg->tx_mask)) ||
                    ((card->txbd_wrptr & reg->tx_rollover_ind) ==
index baf9715ddc1034bc58e6ec4cea367caa670b571c..5847863a2d6bec573956ff88c42a16c3495333b3 100644 (file)
@@ -527,7 +527,8 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
 
                        if (ch->flags & IEEE80211_CHAN_NO_IR)
                                scan_chan_list[chan_idx].chan_scan_mode_bitmap
-                                       |= MWIFIEX_PASSIVE_SCAN;
+                                       |= (MWIFIEX_PASSIVE_SCAN |
+                                           MWIFIEX_HIDDEN_SSID_REPORT);
                        else
                                scan_chan_list[chan_idx].chan_scan_mode_bitmap
                                        &= ~MWIFIEX_PASSIVE_SCAN;
@@ -823,6 +824,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        int i;
        u8 ssid_filter;
        struct mwifiex_ie_types_htcap *ht_cap;
+       struct mwifiex_ie_types_bss_mode *bss_mode;
 
        /* The tlv_buf_len is calculated for each scan command.  The TLVs added
           in this routine will be preserved since the routine that sends the
@@ -908,6 +910,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                                wildcard_ssid_tlv->max_ssid_length =
                                                        IEEE80211_MAX_SSID_LEN;
 
+                       if (!memcmp(user_scan_in->ssid_list[i].ssid,
+                                   "DIRECT-", 7))
+                               wildcard_ssid_tlv->max_ssid_length = 0xfe;
+
                        memcpy(wildcard_ssid_tlv->ssid,
                               user_scan_in->ssid_list[i].ssid, ssid_len);
 
@@ -968,6 +974,15 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        else
                *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
 
+       if (adapter->ext_scan) {
+               bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos;
+               bss_mode->header.type = cpu_to_le16(TLV_TYPE_BSS_MODE);
+               bss_mode->header.len = cpu_to_le16(sizeof(bss_mode->bss_mode));
+               bss_mode->bss_mode = scan_cfg_out->bss_mode;
+               tlv_pos += sizeof(bss_mode->header) +
+                          le16_to_cpu(bss_mode->header.len);
+       }
+
        /* If the input config or adapter has the number of Probes set,
           add tlv */
        if (num_probes) {
@@ -1035,7 +1050,8 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                        if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
                                (scan_chan_list +
                                 chan_idx)->chan_scan_mode_bitmap
-                                       |= MWIFIEX_PASSIVE_SCAN;
+                                       |= (MWIFIEX_PASSIVE_SCAN |
+                                           MWIFIEX_HIDDEN_SSID_REPORT);
                        else
                                (scan_chan_list +
                                 chan_idx)->chan_scan_mode_bitmap
@@ -1586,6 +1602,62 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
        return ret;
 }
 
+/* This function checks if SSID string contains all zeroes or length is zero */
+static bool mwifiex_is_hidden_ssid(struct cfg80211_ssid *ssid)
+{
+       int idx;
+
+       for (idx = 0; idx < ssid->ssid_len; idx++) {
+               if (ssid->ssid[idx])
+                       return false;
+       }
+
+       return true;
+}
+
+/* This function checks if any hidden SSID found in passive scan channels
+ * and save those channels for specific SSID active scan
+ */
+static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv,
+                                            struct cfg80211_bss *bss)
+{
+       struct mwifiex_bssdescriptor *bss_desc;
+       int ret;
+       int chid;
+
+       /* Allocate and fill new bss descriptor */
+       bss_desc = kzalloc(sizeof(*bss_desc), GFP_KERNEL);
+       if (!bss_desc)
+               return -ENOMEM;
+
+       ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
+       if (ret)
+               goto done;
+
+       if (mwifiex_is_hidden_ssid(&bss_desc->ssid)) {
+               mwifiex_dbg(priv->adapter, INFO, "found hidden SSID\n");
+               for (chid = 0 ; chid < MWIFIEX_USER_SCAN_CHAN_MAX; chid++) {
+                       if (priv->hidden_chan[chid].chan_number ==
+                           bss->channel->hw_value)
+                               break;
+
+                       if (!priv->hidden_chan[chid].chan_number) {
+                               priv->hidden_chan[chid].chan_number =
+                                       bss->channel->hw_value;
+                               priv->hidden_chan[chid].radio_type =
+                                       bss->channel->band;
+                               priv->hidden_chan[chid].scan_type =
+                                       MWIFIEX_SCAN_TYPE_ACTIVE;
+                               break;
+                       }
+               }
+       }
+
+done:
+       kfree(bss_desc);
+       return 0;
+}
+
 static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
                                          struct cfg80211_bss *bss)
 {
@@ -1775,6 +1847,14 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
                                    .mac_address, ETH_ALEN))
                                mwifiex_update_curr_bss_params(priv, bss);
                        cfg80211_put_bss(priv->wdev.wiphy, bss);
+
+                       if ((chan->flags & IEEE80211_CHAN_RADAR) ||
+                           (chan->flags & IEEE80211_CHAN_NO_IR)) {
+                               mwifiex_dbg(adapter, INFO,
+                                           "radar or passive channel %d\n",
+                                           channel);
+                               mwifiex_save_hidden_ssid_channels(priv, bss);
+                       }
                }
        } else {
                mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
@@ -1798,6 +1878,57 @@ static void mwifiex_complete_scan(struct mwifiex_private *priv)
        }
 }
 
+/* This function checks if any hidden SSID found in passive scan channels
+ * and do specific SSID active scan for those channels
+ */
+static int
+mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
+{
+       int ret;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       u8 id = 0;
+       struct mwifiex_user_scan_cfg  *user_scan_cfg;
+
+       if (adapter->active_scan_triggered) {
+               adapter->active_scan_triggered = false;
+               return 0;
+       }
+
+       if (!priv->hidden_chan[0].chan_number) {
+               mwifiex_dbg(adapter, INFO, "No BSS with hidden SSID found on DFS channels\n");
+               return 0;
+       }
+       user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
+
+       if (!user_scan_cfg)
+               return -ENOMEM;
+
+       memset(user_scan_cfg, 0, sizeof(*user_scan_cfg));
+
+       for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) {
+               if (!priv->hidden_chan[id].chan_number)
+                       break;
+               memcpy(&user_scan_cfg->chan_list[id],
+                      &priv->hidden_chan[id],
+                      sizeof(struct mwifiex_user_scan_chan));
+       }
+
+       adapter->active_scan_triggered = true;
+       user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
+       user_scan_cfg->ssid_list = priv->scan_request->ssids;
+
+       ret = mwifiex_scan_networks(priv, user_scan_cfg);
+       kfree(user_scan_cfg);
+
+       memset(&priv->hidden_chan, 0, sizeof(priv->hidden_chan));
+
+       if (ret) {
+               dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
 static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
 {
        struct mwifiex_adapter *adapter = priv->adapter;
@@ -1811,6 +1942,8 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
                adapter->scan_processing = false;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
+               mwifiex_active_scan_req_for_passive_chan(priv);
+
                if (!adapter->ext_scan)
                        mwifiex_complete_scan(priv);
 
@@ -1837,15 +1970,17 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
                adapter->scan_processing = false;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
-               if (priv->scan_request) {
-                       mwifiex_dbg(adapter, INFO,
-                                   "info: aborting scan\n");
-                       cfg80211_scan_done(priv->scan_request, 1);
-                       priv->scan_request = NULL;
-               } else {
-                       priv->scan_aborting = false;
-                       mwifiex_dbg(adapter, INFO,
-                                   "info: scan already aborted\n");
+               if (!adapter->active_scan_triggered) {
+                       if (priv->scan_request) {
+                               mwifiex_dbg(adapter, INFO,
+                                           "info: aborting scan\n");
+                               cfg80211_scan_done(priv->scan_request, 1);
+                               priv->scan_request = NULL;
+                       } else {
+                               priv->scan_aborting = false;
+                               mwifiex_dbg(adapter, INFO,
+                                           "info: scan already aborted\n");
+                       }
                }
        } else {
                /* Get scan command from scan_pending_q and put to
index a0b121f3460c871eefca6fd30d79a3217ef401c8..5d05c6fe642985cd377eefec707d75112f0240ea 100644 (file)
@@ -51,6 +51,10 @@ static unsigned long iface_work_flags;
 
 static struct semaphore add_remove_card_sem;
 
+static struct memory_type_mapping generic_mem_type_map[] = {
+       {"DUMP", NULL, 0, 0xDD},
+};
+
 static struct memory_type_mapping mem_type_mapping_tbl[] = {
        {"ITCM", NULL, 0, 0xF0},
        {"DTCM", NULL, 0, 0xF1},
@@ -91,6 +95,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                return -ENOMEM;
 
        card->func = func;
+       card->device_id = id;
 
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
@@ -107,6 +112,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
                card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
                card->can_dump_fw = data->can_dump_fw;
+               card->fw_dump_enh = data->fw_dump_enh;
                card->can_auto_tdls = data->can_auto_tdls;
                card->can_ext_scan = data->can_ext_scan;
        }
@@ -287,6 +293,8 @@ static int mwifiex_sdio_suspend(struct device *dev)
 #define SDIO_DEVICE_ID_MARVELL_8887   (0x9135)
 /* Device ID for SD8801 */
 #define SDIO_DEVICE_ID_MARVELL_8801   (0x9139)
+/* Device ID for SD8997 */
+#define SDIO_DEVICE_ID_MARVELL_8997   (0x9141)
 
 
 /* WLAN IDs */
@@ -303,6 +311,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
                .driver_data = (unsigned long)&mwifiex_sdio_sd8887},
        {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
                .driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
+               .driver_data = (unsigned long)&mwifiex_sdio_sd8997},
        {},
 };
 
@@ -910,6 +920,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
        if (!fwbuf)
                return -ENOMEM;
 
+       sdio_claim_host(card->func);
+
        /* Perform firmware data transfer */
        do {
                /* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
@@ -1014,6 +1026,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
                offset += txlen;
        } while (true);
 
+       sdio_release_host(card->func);
+
        mwifiex_dbg(adapter, MSG,
                    "info: FW download over, size %d bytes\n", offset);
 
@@ -1964,8 +1978,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        adapter->dev = &func->dev;
 
        strcpy(adapter->fw_name, card->firmware);
-       adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
-       adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+       if (card->fw_dump_enh) {
+               adapter->mem_type_mapping_tbl = generic_mem_type_map;
+               adapter->num_mem_types = 1;
+       } else {
+               adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
+               adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+       }
 
        return 0;
 }
@@ -2107,26 +2126,46 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
                    port, card->mp_data_port_mask);
 }
 
+static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
+{
+       struct sdio_func *func = card->func;
+       const struct sdio_device_id *device_id = card->device_id;
+
+       /* TODO mmc_hw_reset does not require destroying and re-probing the
+        * whole adapter. Hence there was no need to for this rube-goldberg
+        * design to reload the fw from an external workqueue. If we don't
+        * destroy the adapter we could reload the fw from
+        * mwifiex_main_work_queue directly.
+        * The real difficulty with fw reset is to restore all the user
+        * settings applied through ioctl. By destroying and recreating the
+        * adapter, we take the easy way out, since we rely on user space to
+        * restore them. We assume that user space will treat the new
+        * incarnation of the adapter(interfaces) as if they had been just
+        * discovered and initializes them from scratch.
+        */
+
+       mwifiex_sdio_remove(func);
+
+       /* power cycle the adapter */
+       sdio_claim_host(func);
+       mmc_hw_reset(func->card->host);
+       sdio_release_host(func);
+
+       mwifiex_sdio_probe(func, device_id);
+}
+
 static struct mwifiex_adapter *save_adapter;
 static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
-       struct mmc_host *target = card->func->card->host;
-
-       /* The actual reset operation must be run outside of driver thread.
-        * This is because mmc_remove_host() will cause the device to be
-        * instantly destroyed, and the driver then needs to end its thread,
-        * leading to a deadlock.
-        *
-        * We run it in a totally independent workqueue.
-        */
 
-       mwifiex_dbg(adapter, WARN, "Resetting card...\n");
-       mmc_remove_host(target);
-       /* 200ms delay is based on experiment with sdhci controller */
-       mdelay(200);
-       target->rescan_entered = 0; /* rescan non-removable cards */
-       mmc_add_host(target);
+       /* TODO card pointer is unprotected. If the adapter is removed
+        * physically, sdio core might trigger mwifiex_sdio_remove, before this
+        * workqueue is run, which will destroy the adapter struct. When this
+        * workqueue eventually exceutes it will dereference an invalid adapter
+        * pointer
+        */
+       mwifiex_recreate_adapter(card);
 }
 
 /* This function read/write firmware */
@@ -2138,8 +2177,8 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
        int ret, tries;
        u8 ctrl_data = 0;
 
-       sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
-                   &ret);
+       sdio_writeb(card->func, card->reg->fw_dump_host_ready,
+                   card->reg->fw_dump_ctrl, &ret);
        if (ret) {
                mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
                return RDWR_STATUS_FAILURE;
@@ -2155,10 +2194,10 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
                        break;
                if (doneflag && ctrl_data == doneflag)
                        return RDWR_STATUS_DONE;
-               if (ctrl_data != FW_DUMP_HOST_READY) {
+               if (ctrl_data != card->reg->fw_dump_host_ready) {
                        mwifiex_dbg(adapter, WARN,
-                                   "The ctrl reg was changed, re-try again!\n");
-                       sdio_writeb(card->func, FW_DUMP_HOST_READY,
+                                   "The ctrl reg was changed, re-try again\n");
+                       sdio_writeb(card->func, card->reg->fw_dump_host_ready,
                                    card->reg->fw_dump_ctrl, &ret);
                        if (ret) {
                                mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
@@ -2167,7 +2206,7 @@ rdwr_status mwifiex_sdio_rdwr_firmware(struct mwifiex_adapter *adapter,
                }
                usleep_range(100, 200);
        }
-       if (ctrl_data == FW_DUMP_HOST_READY) {
+       if (ctrl_data == card->reg->fw_dump_host_ready) {
                mwifiex_dbg(adapter, ERROR,
                            "Fail to pull ctrl_data\n");
                return RDWR_STATUS_FAILURE;
@@ -2300,10 +2339,129 @@ done:
        sdio_release_host(card->func);
 }
 
+static void mwifiex_sdio_generic_fw_dump(struct mwifiex_adapter *adapter)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       struct memory_type_mapping *entry = &generic_mem_type_map[0];
+       unsigned int reg, reg_start, reg_end;
+       u8 start_flag = 0, done_flag = 0;
+       u8 *dbg_ptr, *end_ptr;
+       enum rdwr_status stat;
+       int ret = -1, tries;
+
+       if (!card->fw_dump_enh)
+               return;
+
+       if (entry->mem_ptr) {
+               vfree(entry->mem_ptr);
+               entry->mem_ptr = NULL;
+       }
+       entry->mem_size = 0;
+
+       mwifiex_pm_wakeup_card(adapter);
+       sdio_claim_host(card->func);
+
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
+
+       stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+       if (stat == RDWR_STATUS_FAILURE)
+               goto done;
+
+       reg_start = card->reg->fw_dump_start;
+       reg_end = card->reg->fw_dump_end;
+       for (reg = reg_start; reg <= reg_end; reg++) {
+               for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+                       start_flag = sdio_readb(card->func, reg, &ret);
+                       if (ret) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "SDIO read err\n");
+                               goto done;
+                       }
+                       if (start_flag == 0)
+                               break;
+                       if (tries == MAX_POLL_TRIES) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "FW not ready to dump\n");
+                               ret = -1;
+                               goto done;
+                       }
+               }
+               usleep_range(100, 200);
+       }
+
+       entry->mem_ptr = vmalloc(0xf0000 + 1);
+       if (!entry->mem_ptr) {
+               ret = -1;
+               goto done;
+       }
+       dbg_ptr = entry->mem_ptr;
+       entry->mem_size = 0xf0000;
+       end_ptr = dbg_ptr + entry->mem_size;
+
+       done_flag = entry->done_flag;
+       mwifiex_dbg(adapter, DUMP,
+                   "Start %s output, please wait...\n", entry->mem_name);
+
+       while (true) {
+               stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+               if (stat == RDWR_STATUS_FAILURE)
+                       goto done;
+               for (reg = reg_start; reg <= reg_end; reg++) {
+                       *dbg_ptr = sdio_readb(card->func, reg, &ret);
+                       if (ret) {
+                               mwifiex_dbg(adapter, ERROR,
+                                           "SDIO read err\n");
+                               goto done;
+                       }
+                       dbg_ptr++;
+                       if (dbg_ptr >= end_ptr) {
+                               u8 *tmp_ptr;
+
+                               tmp_ptr = vmalloc(entry->mem_size + 0x4000 + 1);
+                               if (!tmp_ptr)
+                                       goto done;
+
+                               memcpy(tmp_ptr, entry->mem_ptr,
+                                      entry->mem_size);
+                               vfree(entry->mem_ptr);
+                               entry->mem_ptr = tmp_ptr;
+                               tmp_ptr = NULL;
+                               dbg_ptr = entry->mem_ptr + entry->mem_size;
+                               entry->mem_size += 0x4000;
+                               end_ptr = entry->mem_ptr + entry->mem_size;
+                       }
+               }
+               if (stat == RDWR_STATUS_DONE) {
+                       entry->mem_size = dbg_ptr - entry->mem_ptr;
+                       mwifiex_dbg(adapter, DUMP, "dump %s done size=0x%x\n",
+                                   entry->mem_name, entry->mem_size);
+                       ret = 0;
+                       break;
+               }
+       }
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
+
+done:
+       if (ret) {
+               mwifiex_dbg(adapter, ERROR, "firmware dump failed\n");
+               if (entry->mem_ptr) {
+                       vfree(entry->mem_ptr);
+                       entry->mem_ptr = NULL;
+               }
+               entry->mem_size = 0;
+       }
+       sdio_release_host(card->func);
+}
+
 static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
 {
+       struct sdio_mmc_card *card = adapter->card;
+
        mwifiex_drv_info_dump(adapter);
-       mwifiex_sdio_fw_dump(adapter);
+       if (card->fw_dump_enh)
+               mwifiex_sdio_generic_fw_dump(adapter);
+       else
+               mwifiex_sdio_fw_dump(adapter);
        mwifiex_upload_device_dump(adapter);
 }
 
@@ -2510,3 +2668,4 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
index 6f645cf47369baddaa10bc6489837fa914a14a57..b9fbc5cf6262d8647d6064ddde51211acf749f72 100644 (file)
@@ -35,6 +35,7 @@
 #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
 #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
 #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
 
 #define BLOCK_MODE     1
 #define BYTE_MODE      0
@@ -222,6 +223,7 @@ struct mwifiex_sdio_card_reg {
        u8 cmd_cfg_1;
        u8 cmd_cfg_2;
        u8 cmd_cfg_3;
+       u8 fw_dump_host_ready;
        u8 fw_dump_ctrl;
        u8 fw_dump_start;
        u8 fw_dump_end;
@@ -257,11 +259,15 @@ struct sdio_mmc_card {
        bool supports_sdio_new_mode;
        bool has_control_mask;
        bool can_dump_fw;
+       bool fw_dump_enh;
        bool can_auto_tdls;
        bool can_ext_scan;
 
        struct mwifiex_sdio_mpa_tx mpa_tx;
        struct mwifiex_sdio_mpa_rx mpa_rx;
+
+       /* needed for card reset */
+       const struct sdio_device_id *device_id;
 };
 
 struct mwifiex_sdio_device {
@@ -275,6 +281,7 @@ struct mwifiex_sdio_device {
        bool supports_sdio_new_mode;
        bool has_control_mask;
        bool can_dump_fw;
+       bool fw_dump_enh;
        bool can_auto_tdls;
        bool can_ext_scan;
 };
@@ -350,6 +357,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
        .cmd_cfg_1 = 0xb9,
        .cmd_cfg_2 = 0xba,
        .cmd_cfg_3 = 0xbb,
+       .fw_dump_host_ready = 0xee,
        .fw_dump_ctrl = 0xe2,
        .fw_dump_start = 0xe3,
        .fw_dump_end = 0xea,
@@ -361,6 +369,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
                                 0x59, 0x5c, 0x5d},
 };
 
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
+       .start_rd_port = 0,
+       .start_wr_port = 0,
+       .base_0_reg = 0xF8,
+       .base_1_reg = 0xF9,
+       .poll_reg = 0x5C,
+       .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+                       CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+       .host_int_rsr_reg = 0x4,
+       .host_int_status_reg = 0x0C,
+       .host_int_mask_reg = 0x08,
+       .status_reg_0 = 0xE8,
+       .status_reg_1 = 0xE9,
+       .sdio_int_mask = 0xff,
+       .data_port_mask = 0xffffffff,
+       .io_port_0_reg = 0xE4,
+       .io_port_1_reg = 0xE5,
+       .io_port_2_reg = 0xE6,
+       .max_mp_regs = 196,
+       .rd_bitmap_l = 0x10,
+       .rd_bitmap_u = 0x11,
+       .rd_bitmap_1l = 0x12,
+       .rd_bitmap_1u = 0x13,
+       .wr_bitmap_l = 0x14,
+       .wr_bitmap_u = 0x15,
+       .wr_bitmap_1l = 0x16,
+       .wr_bitmap_1u = 0x17,
+       .rd_len_p0_l = 0x18,
+       .rd_len_p0_u = 0x19,
+       .card_misc_cfg_reg = 0xd8,
+       .card_cfg_2_1_reg = 0xd9,
+       .cmd_rd_len_0 = 0xc0,
+       .cmd_rd_len_1 = 0xc1,
+       .cmd_rd_len_2 = 0xc2,
+       .cmd_rd_len_3 = 0xc3,
+       .cmd_cfg_0 = 0xc4,
+       .cmd_cfg_1 = 0xc5,
+       .cmd_cfg_2 = 0xc6,
+       .cmd_cfg_3 = 0xc7,
+       .fw_dump_host_ready = 0xcc,
+       .fw_dump_ctrl = 0xf0,
+       .fw_dump_start = 0xf1,
+       .fw_dump_end = 0xf8,
+       .func1_dump_reg_start = 0x10,
+       .func1_dump_reg_end = 0x17,
+       .func1_scratch_reg = 0xe8,
+       .func1_spec_reg_num = 13,
+       .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+                                0x60, 0x61, 0x62, 0x64,
+                                0x65, 0x66, 0x68, 0x69,
+                                0x6a},
+};
+
 static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
        .start_rd_port = 0,
        .start_wr_port = 0,
@@ -469,6 +530,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
        .can_ext_scan = true,
 };
 
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+       .firmware = SD8997_DEFAULT_FW_NAME,
+       .reg = &mwifiex_reg_sd8997,
+       .max_ports = 32,
+       .mp_agg_pkt_limit = 16,
+       .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+       .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+       .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+       .supports_sdio_new_mode = true,
+       .has_control_mask = false,
+       .can_dump_fw = true,
+       .fw_dump_enh = true,
+       .can_auto_tdls = false,
+       .can_ext_scan = true,
+};
+
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
        .firmware = SD8887_DEFAULT_FW_NAME,
        .reg = &mwifiex_reg_sd8887,
index 037adcd1f484abeb5f54478893ab7b1ff2b7ab76..a49a80dd773edbf02fe99d433d9eddee48349c76 100644 (file)
 #include "11n.h"
 #include "11ac.h"
 
+static bool drcs;
+module_param(drcs, bool, 0644);
+MODULE_PARM_DESC(drcs, "multi-channel operation:1, single-channel operation:0");
+
 static bool disable_auto_ds;
 module_param(disable_auto_ds, bool, 0);
 MODULE_PARM_DESC(disable_auto_ds,
@@ -1511,6 +1515,22 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
        return 0;
 }
 
+static int
+mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv,
+                         struct host_cmd_ds_command *cmd,
+                         u16 cmd_action, void *data_buf)
+{
+       struct host_cmd_ds_multi_chan_policy *mc_pol = &cmd->params.mc_policy;
+       const u16 *drcs_info = data_buf;
+
+       mc_pol->action = cpu_to_le16(cmd_action);
+       mc_pol->policy = cpu_to_le16(*drcs_info);
+       cmd->command = cpu_to_le16(HostCmd_CMD_MC_POLICY);
+       cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_multi_chan_policy) +
+                               S_DS_GEN);
+       return 0;
+}
+
 static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
                         struct host_cmd_ds_command *cmd,
@@ -1575,6 +1595,50 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
        return 0;
 }
 
+static int
+mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
+                       struct host_cmd_ds_command *cmd,
+                       u16 cmd_action, void *data_buf)
+{
+       struct host_cmd_ds_tdls_config *tdls_config = &cmd->params.tdls_config;
+       struct mwifiex_tdls_init_cs_params *config;
+       struct mwifiex_tdls_config *init_config;
+       u16 len;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG);
+       cmd->size = cpu_to_le16(S_DS_GEN);
+       tdls_config->tdls_action = cpu_to_le16(cmd_action);
+       le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
+
+       switch (cmd_action) {
+       case ACT_TDLS_CS_ENABLE_CONFIG:
+               init_config = data_buf;
+               len = sizeof(*init_config);
+               memcpy(tdls_config->tdls_data, init_config, len);
+               break;
+       case ACT_TDLS_CS_INIT:
+               config = data_buf;
+               len = sizeof(*config);
+               memcpy(tdls_config->tdls_data, config, len);
+               break;
+       case ACT_TDLS_CS_STOP:
+               len = sizeof(struct mwifiex_tdls_stop_cs_params);
+               memcpy(tdls_config->tdls_data, data_buf, len);
+               break;
+       case ACT_TDLS_CS_PARAMS:
+               len = sizeof(struct mwifiex_tdls_config_cs_params);
+               memcpy(tdls_config->tdls_data, data_buf, len);
+               break;
+       default:
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Unknown TDLS configuration\n");
+               return -ENOTSUPP;
+       }
+
+       le16_add_cpu(&cmd->size, len);
+       return 0;
+}
+
 static int
 mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
                      struct host_cmd_ds_command *cmd,
@@ -1933,10 +1997,12 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
                        cmd_ptr->params.bss_mode.con_type =
                                CONNECTION_TYPE_ADHOC;
-               else if (priv->bss_mode == NL80211_IFTYPE_STATION)
+               else if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+                        priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT)
                        cmd_ptr->params.bss_mode.con_type =
                                CONNECTION_TYPE_INFRA;
-               else if (priv->bss_mode == NL80211_IFTYPE_AP)
+               else if (priv->bss_mode == NL80211_IFTYPE_AP ||
+                        priv->bss_mode == NL80211_IFTYPE_P2P_GO)
                        cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
                cmd_ptr->size = cpu_to_le16(sizeof(struct
                                host_cmd_ds_set_bss_mode) + S_DS_GEN);
@@ -1958,6 +2024,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_TDLS_OPER:
                ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
                break;
+       case HostCmd_CMD_TDLS_CONFIG:
+               ret = mwifiex_cmd_tdls_config(priv, cmd_ptr, cmd_action,
+                                             data_buf);
+               break;
        case HostCmd_CMD_CHAN_REPORT_REQUEST:
                ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
                                                            data_buf);
@@ -1966,6 +2036,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
                                                   data_buf);
                break;
+       case HostCmd_CMD_MC_POLICY:
+               ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
+                                               data_buf);
+               break;
        default:
                mwifiex_dbg(priv->adapter, ERROR,
                            "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2082,6 +2156,18 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
                        if (ret)
                                return -1;
                }
+
+               if (drcs) {
+                       adapter->drcs_enabled = true;
+                       if (ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+                               ret = mwifiex_send_cmd(priv,
+                                                      HostCmd_CMD_MC_POLICY,
+                                                      HostCmd_ACT_GEN_SET, 0,
+                                                      &adapter->drcs_enabled,
+                                                      true);
+                       if (ret)
+                               return -1;
+               }
        }
 
        /* get tx rate */
index b645884b3b97a2c1491f745e436d217dec3b5f4d..87b69d8ad120e0e3f51ffb7edfbf39ff6f1d6dbf 100644 (file)
@@ -599,6 +599,7 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
                                    "info: key: GTK is set\n");
                        priv->wpa_is_gtk_set = true;
                        priv->scan_block = false;
+                       priv->port_open = true;
                }
        }
 
@@ -629,6 +630,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
                        mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
                        priv->wpa_is_gtk_set = true;
                        priv->scan_block = false;
+                       priv->port_open = true;
                }
        }
 
@@ -893,7 +895,7 @@ static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
        case ACT_TDLS_DELETE:
                if (reason) {
                        if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
-                               mwifiex_dbg(priv->adapter, ERROR,
+                               mwifiex_dbg(priv->adapter, MSG,
                                            "TDLS link delete for %pM failed: reason %d\n",
                                            cmd_tdls_oper->peer_mac, reason);
                        else
@@ -1191,12 +1193,15 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_TDLS_OPER:
                ret = mwifiex_ret_tdls_oper(priv, resp);
+       case HostCmd_CMD_MC_POLICY:
                break;
        case HostCmd_CMD_CHAN_REPORT_REQUEST:
                break;
        case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
                ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
                break;
+       case HostCmd_CMD_TDLS_CONFIG:
+               break;
        default:
                mwifiex_dbg(adapter, ERROR,
                            "CMD_RESP: unknown cmd response %#x\n",
index 848de2621958cfaacbcd500ae7317fddf6d92085..3d18c585e5436769061a3de767e17c22674206c7 100644 (file)
@@ -54,6 +54,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        priv->media_connected = false;
 
        priv->scan_block = false;
+       priv->port_open = false;
 
        if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
            ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) {
@@ -153,6 +154,7 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
        struct mwifiex_sta_node *sta_ptr;
        struct mwifiex_tdls_generic_event *tdls_evt =
                        (void *)event_skb->data + sizeof(adapter->event_cause);
+       u8 *mac = tdls_evt->peer_mac;
 
        /* reserved 2 bytes are not mandatory in tdls event */
        if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
@@ -175,6 +177,59 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
                                           le16_to_cpu(tdls_evt->u.reason_code),
                                           GFP_KERNEL);
                break;
+       case TDLS_EVENT_CHAN_SWITCH_RESULT:
+               mwifiex_dbg(adapter, EVENT, "tdls channel switch result :\n");
+               mwifiex_dbg(adapter, EVENT,
+                           "status=0x%x, reason=0x%x cur_chan=%d\n",
+                           tdls_evt->u.switch_result.status,
+                           tdls_evt->u.switch_result.reason,
+                           tdls_evt->u.switch_result.cur_chan);
+
+               /* tdls channel switch failed */
+               if (tdls_evt->u.switch_result.status != 0) {
+                       switch (tdls_evt->u.switch_result.cur_chan) {
+                       case TDLS_BASE_CHANNEL:
+                               sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+                               break;
+                       case TDLS_OFF_CHANNEL:
+                               sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+                               break;
+                       default:
+                               break;
+                       }
+                       return ret;
+               }
+
+               /* tdls channel switch success */
+               switch (tdls_evt->u.switch_result.cur_chan) {
+               case TDLS_BASE_CHANNEL:
+                       if (sta_ptr->tdls_status == TDLS_IN_BASE_CHAN)
+                               break;
+                       mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+                                                                 false);
+                       sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+                       break;
+               case TDLS_OFF_CHANNEL:
+                       if (sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)
+                               break;
+                       mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+                                                                 true);
+                       sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+                       break;
+               default:
+                       break;
+               }
+
+               break;
+       case TDLS_EVENT_START_CHAN_SWITCH:
+               mwifiex_dbg(adapter, EVENT, "tdls start channel switch...\n");
+               sta_ptr->tdls_status = TDLS_CHAN_SWITCHING;
+               break;
+       case TDLS_EVENT_CHAN_SWITCH_STOPPED:
+               mwifiex_dbg(adapter, EVENT,
+                           "tdls chan switch stopped, reason=%d\n",
+                           tdls_evt->u.cs_stop_reason);
+               break;
        default:
                break;
        }
@@ -182,6 +237,145 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
        return ret;
 }
 
+static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
+                                        struct mwifiex_ie_types_header *tlv)
+{
+       struct mwifiex_tx_pause_tlv *tp;
+       struct mwifiex_sta_node *sta_ptr;
+       unsigned long flags;
+
+       tp = (void *)tlv;
+       mwifiex_dbg(priv->adapter, EVENT,
+                   "uap tx_pause: %pM pause=%d, pkts=%d\n",
+                   tp->peermac, tp->tx_pause,
+                   tp->pkt_cnt);
+
+       if (ether_addr_equal(tp->peermac, priv->netdev->dev_addr)) {
+               if (tp->tx_pause)
+                       priv->port_open = false;
+               else
+                       priv->port_open = true;
+       } else if (is_multicast_ether_addr(tp->peermac)) {
+               mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause);
+       } else {
+               spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+               sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+               spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+               if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+                       sta_ptr->tx_pause = tp->tx_pause;
+                       mwifiex_update_ralist_tx_pause(priv, tp->peermac,
+                                                      tp->tx_pause);
+               }
+       }
+}
+
+static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
+                                        struct mwifiex_ie_types_header *tlv)
+{
+       struct mwifiex_tx_pause_tlv *tp;
+       struct mwifiex_sta_node *sta_ptr;
+       int status;
+       unsigned long flags;
+
+       tp = (void *)tlv;
+       mwifiex_dbg(priv->adapter, EVENT,
+                   "sta tx_pause: %pM pause=%d, pkts=%d\n",
+                   tp->peermac, tp->tx_pause,
+                   tp->pkt_cnt);
+
+       if (ether_addr_equal(tp->peermac, priv->cfg_bssid)) {
+               if (tp->tx_pause)
+                       priv->port_open = false;
+               else
+                       priv->port_open = true;
+       } else {
+               if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+                       return;
+
+               status = mwifiex_get_tdls_link_status(priv, tp->peermac);
+               if (mwifiex_is_tdls_link_setup(status)) {
+                       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+                       sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+                       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+                       if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+                               sta_ptr->tx_pause = tp->tx_pause;
+                               mwifiex_update_ralist_tx_pause(priv,
+                                                              tp->peermac,
+                                                              tp->tx_pause);
+                       }
+               }
+       }
+}
+
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+                                     struct sk_buff *event_skb)
+{
+       struct mwifiex_ie_types_multi_chan_info *chan_info;
+       u16 status;
+
+       chan_info = (void *)event_skb->data + sizeof(u32);
+
+       if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) {
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "unknown TLV in chan_info event\n");
+               return;
+       }
+
+       status = le16_to_cpu(chan_info->status);
+
+       if (status) {
+               mwifiex_dbg(priv->adapter, EVENT,
+                           "multi-channel operation started\n");
+       } else {
+               mwifiex_dbg(priv->adapter, EVENT,
+                           "multi-channel operation over\n");
+       }
+}
+
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+                                   struct sk_buff *event_skb)
+{
+       struct mwifiex_ie_types_header *tlv;
+       u16 tlv_type, tlv_len;
+       int tlv_buf_left;
+
+       if (!priv->media_connected) {
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "tx_pause event while disconnected; bss_role=%d\n",
+                           priv->bss_role);
+               return;
+       }
+
+       tlv_buf_left = event_skb->len - sizeof(u32);
+       tlv = (void *)event_skb->data + sizeof(u32);
+
+       while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) {
+               tlv_type = le16_to_cpu(tlv->type);
+               tlv_len  = le16_to_cpu(tlv->len);
+               if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) >
+                   tlv_buf_left) {
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "wrong tlv: tlvLen=%d, tlvBufLeft=%d\n",
+                                   tlv_len, tlv_buf_left);
+                       break;
+               }
+               if (tlv_type == TLV_TYPE_TX_PAUSE) {
+                       if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
+                               mwifiex_process_sta_tx_pause(priv, tlv);
+                       else
+                               mwifiex_process_uap_tx_pause(priv, tlv);
+               }
+
+               tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) +
+                               tlv_len;
+               tlv = (void *)((u8 *)tlv + tlv_len +
+                              sizeof(struct mwifiex_ie_types_header));
+       }
+
+}
+
 /*
 * This function handles coex events generated by firmware
 */
@@ -359,7 +553,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_PS_AWAKE:
                mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
-               if (!adapter->pps_uapsd_mode &&
+               if (!adapter->pps_uapsd_mode && priv->port_open &&
                    priv->media_connected && adapter->sleep_period.period) {
                                adapter->pps_uapsd_mode = true;
                                mwifiex_dbg(adapter, EVENT,
@@ -438,6 +632,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_PORT_RELEASE:
                mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
+               priv->port_open = true;
                break;
 
        case EVENT_EXT_SCAN_REPORT:
@@ -573,6 +768,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
                break;
 
+       case EVENT_TX_DATA_PAUSE:
+               mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+               mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+               break;
+
+       case EVENT_MULTI_CHAN_INFO:
+               mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+               mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+               break;
+
        case EVENT_TX_STATUS_REPORT:
                mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
                mwifiex_parse_tx_status_event(priv, adapter->event_body);
index d8b7d9c20450f704988c22e26ce81656d4e1a621..a6c8a4f7bfe96aa44b5f46db60c29944abf5a79b 100644 (file)
@@ -66,8 +66,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
        if (status <= 0) {
                if (status == 0)
                        status = -ETIMEDOUT;
-               mwifiex_dbg(adapter, ERROR,
-                           "cmd_wait_q terminated: %d\n", status);
+               mwifiex_dbg(adapter, ERROR, "cmd_wait_q terminated: %d\n",
+                           status);
                mwifiex_cancel_all_pending_cmd(adapter);
                return status;
        }
index 2faa1bc42abee2eb838d65c6c75c33a243f53875..b3e163de98995ba62767ab904f636853e2847ac3 100644 (file)
@@ -49,7 +49,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
                tid = skb->priority;
                tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
 
-               if (status == TDLS_SETUP_COMPLETE) {
+               if (mwifiex_is_tdls_link_setup(status)) {
                        ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
                        ra_list->tdls_link = true;
                        tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -164,7 +164,7 @@ static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
        pos = (void *)skb_put(skb, 4);
        *pos++ = WLAN_EID_AID;
        *pos++ = 2;
-       *pos++ = le16_to_cpu(assoc_rsp->a_id);
+       memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id));
 
        return;
 }
@@ -355,6 +355,7 @@ static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
        extcap->ieee_hdr.len = 8;
        memset(extcap->ext_capab, 0, 8);
        extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+       extcap->ext_capab[3] |= WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH;
 
        if (priv->adapter->is_hw_11ac_capable)
                extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
@@ -1071,6 +1072,11 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
                        for (i = 0; i < MAX_NUM_TID; i++)
                                sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
                }
+               if (sta_ptr->tdls_cap.extcap.ext_capab[3] &
+                   WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) {
+                       mwifiex_config_tdls_enable(priv);
+                       mwifiex_config_tdls_cs_params(priv);
+               }
 
                memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
                mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
@@ -1141,7 +1147,7 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv,
 
        spin_lock_irqsave(&priv->sta_list_spinlock, flags);
        list_for_each_entry(sta_ptr, &priv->sta_list, list) {
-               if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) {
+               if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) {
                        ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
                        peer++;
                        count++;
@@ -1295,7 +1301,7 @@ void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
                        if ((link_status == TDLS_NOT_SETUP) &&
                            (peer->tdls_status == TDLS_SETUP_INPROGRESS))
                                peer->failure_count++;
-                       else if (link_status == TDLS_SETUP_COMPLETE)
+                       else if (mwifiex_is_tdls_link_setup(link_status))
                                peer->failure_count = 0;
 
                        peer->tdls_status = link_status;
@@ -1367,7 +1373,7 @@ void mwifiex_check_auto_tdls(unsigned long context)
 
                if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) ||
                     !tdls_peer->rssi) &&
-                   tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
+                   mwifiex_is_tdls_link_setup(tdls_peer->tdls_status)) {
                        tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
                        mwifiex_dbg(priv->adapter, MSG,
                                    "teardown TDLS link,peer=%pM rssi=%d\n",
@@ -1416,3 +1422,67 @@ void mwifiex_clean_auto_tdls(struct mwifiex_private *priv)
                mwifiex_flush_auto_tdls_list(priv);
        }
 }
+
+static int mwifiex_config_tdls(struct mwifiex_private *priv, u8 enable)
+{
+       struct mwifiex_tdls_config config;
+
+       config.enable = cpu_to_le16(enable);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+                               ACT_TDLS_CS_ENABLE_CONFIG, 0, &config, true);
+}
+
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv)
+{
+       return mwifiex_config_tdls(priv, true);
+}
+
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv)
+{
+       return mwifiex_config_tdls(priv, false);
+}
+
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv)
+{
+       struct mwifiex_tdls_config_cs_params config_tdls_cs_params;
+
+       config_tdls_cs_params.unit_time = MWIFIEX_DEF_CS_UNIT_TIME;
+       config_tdls_cs_params.thr_otherlink = MWIFIEX_DEF_CS_THR_OTHERLINK;
+       config_tdls_cs_params.thr_directlink = MWIFIEX_DEF_THR_DIRECTLINK;
+
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+                               ACT_TDLS_CS_PARAMS, 0,
+                               &config_tdls_cs_params, true);
+}
+
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac)
+{
+       struct mwifiex_tdls_stop_cs_params stop_tdls_cs_params;
+
+       ether_addr_copy(stop_tdls_cs_params.peer_mac, peer_mac);
+
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+                               ACT_TDLS_CS_STOP, 0,
+                               &stop_tdls_cs_params, true);
+}
+
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+                         u8 primary_chan, u8 second_chan_offset, u8 band)
+{
+       struct mwifiex_tdls_init_cs_params start_tdls_cs_params;
+
+       ether_addr_copy(start_tdls_cs_params.peer_mac, peer_mac);
+       start_tdls_cs_params.primary_chan = primary_chan;
+       start_tdls_cs_params.second_chan_offset = second_chan_offset;
+       start_tdls_cs_params.band = band;
+
+       start_tdls_cs_params.switch_time = cpu_to_le16(MWIFIEX_DEF_CS_TIME);
+       start_tdls_cs_params.switch_timeout =
+                                       cpu_to_le16(MWIFIEX_DEF_CS_TIMEOUT);
+       start_tdls_cs_params.reg_class = MWIFIEX_DEF_CS_REG_CLASS;
+       start_tdls_cs_params.periodicity = MWIFIEX_DEF_CS_PERIODICITY;
+
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+                               ACT_TDLS_CS_INIT, 0,
+                               &start_tdls_cs_params, true);
+}
index 5ed9b794053e760163dda372f205fd28463a9efe..8b1e5b5d47feee82a634a8de5856a94a135884fe 100644 (file)
@@ -370,8 +370,28 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
                        /* consumes ack_skb */
                        skb_complete_wifi_ack(ack_skb, !tx_status->status);
                } else {
+                       /* Remove broadcast address which was added by driver */
+                       memmove(ack_skb->data +
+                               sizeof(struct ieee80211_hdr_3addr) +
+                               MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16),
+                               ack_skb->data +
+                               sizeof(struct ieee80211_hdr_3addr) +
+                               MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+                               ETH_ALEN, ack_skb->len -
+                               (sizeof(struct ieee80211_hdr_3addr) +
+                               MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+                               ETH_ALEN));
+                       ack_skb->len = ack_skb->len - ETH_ALEN;
+                       /* Remove driver's proprietary header including 2 bytes
+                        * of packet length and pass actual management frame buffer
+                        * to cfg80211.
+                        */
                        cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie,
-                                               ack_skb->data, ack_skb->len,
+                                               ack_skb->data +
+                                               MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+                                               sizeof(u16), ack_skb->len -
+                                               (MWIFIEX_MGMT_FRAME_HEADER_SIZE
+                                                + sizeof(u16)),
                                                !tx_status->status, GFP_ATOMIC);
                        dev_kfree_skb_any(ack_skb);
                }
index b74930054b8c0b1f1cc2066ef2d22f2ca542ebb4..4d5a6e3b6361700c9bcab06201a70cd29d38b66e 100644 (file)
@@ -808,7 +808,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
                             struct mwifiex_uap_bss_param *bss_cfg,
                             struct cfg80211_chan_def chandef)
 {
-       u8 config_bands = 0;
+       u8 config_bands = 0, old_bands = priv->adapter->config_bands;
 
        priv->bss_chandef = chandef;
 
@@ -834,6 +834,11 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
        }
 
        priv->adapter->config_bands = config_bands;
+
+       if (old_bands != config_bands) {
+               mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
+               mwifiex_dnld_txpwr_table(priv);
+       }
 }
 
 int mwifiex_config_start_uap(struct mwifiex_private *priv,
index 7bc1f850e3b7195302577738ef3c546f4fddd92a..46c972a650a43c2918bb7703c98eeb877ccb79e6 100644 (file)
@@ -41,6 +41,8 @@ static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
        mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
                         event->data, event->len);
 
+       skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
+
        while ((evt_len >= sizeof(tlv_hdr->header))) {
                tlv_hdr = (struct mwifiex_ie_types_data *)curr;
                tlv_len = le16_to_cpu(tlv_hdr->header.len);
@@ -176,6 +178,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                break;
        case EVENT_UAP_BSS_IDLE:
                priv->media_connected = false;
+               priv->port_open = false;
                if (netif_carrier_ok(priv->netdev))
                        netif_carrier_off(priv->netdev);
                mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -185,6 +188,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                break;
        case EVENT_UAP_BSS_ACTIVE:
                priv->media_connected = true;
+               priv->port_open = true;
                if (!netif_carrier_ok(priv->netdev))
                        netif_carrier_on(priv->netdev);
                mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
@@ -192,6 +196,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
        case EVENT_UAP_BSS_START:
                mwifiex_dbg(adapter, EVENT,
                            "AP EVENT: event id: %#x\n", eventcause);
+               priv->port_open = false;
                memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
                       ETH_ALEN);
                if (priv->hist_data)
@@ -297,6 +302,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                mwifiex_bt_coex_wlan_param_update_event(priv,
                                                        adapter->event_skb);
                break;
+       case EVENT_TX_DATA_PAUSE:
+               mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+               mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+               break;
+
+       case EVENT_MULTI_CHAN_INFO:
+               mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+               mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+               break;
+
        default:
                mwifiex_dbg(adapter, EVENT,
                            "event: unknown event id: %#x\n", eventcause);
index aada93425f806a74b481e937d7f25fab5a68d4d3..5e789b2e06ea658d78f9997041a7c9853de171ae 100644 (file)
@@ -47,6 +47,11 @@ static struct usb_device_id mwifiex_usb_table[] = {
        {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
                                       USB_CLASS_VENDOR_SPEC,
                                       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+       /* 8997 */
+       {USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
+       {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
+                                      USB_CLASS_VENDOR_SPEC,
+                                      USB_SUBCLASS_VENDOR_SPEC, 0xff)},
        { }     /* Terminating entry */
 };
 
@@ -244,9 +249,11 @@ setup_for_next:
        if (card->rx_cmd_ep == context->ep) {
                mwifiex_usb_submit_rx_urb(context, size);
        } else {
-               context->skb = NULL;
-               if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING)
+               if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){
                        mwifiex_usb_submit_rx_urb(context, size);
+               }else{
+                       context->skb = NULL;
+               }
        }
 
        return;
@@ -380,12 +387,14 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
        case USB8797_PID_1:
        case USB8801_PID_1:
        case USB8897_PID_1:
+       case USB8997_PID_1:
                card->usb_boot_state = USB8XXX_FW_DNLD;
                break;
        case USB8766_PID_2:
        case USB8797_PID_2:
        case USB8801_PID_2:
        case USB8897_PID_2:
+       case USB8997_PID_2:
                card->usb_boot_state = USB8XXX_FW_READY;
                break;
        default:
@@ -812,6 +821,12 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
        adapter->dev = &card->udev->dev;
 
        switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+       case USB8997_PID_1:
+       case USB8997_PID_2:
+               adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+               strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
+               adapter->ext_scan = true;
+               break;
        case USB8897_PID_1:
        case USB8897_PID_2:
                adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
@@ -868,8 +883,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
 
        /* Allocate memory for transmit */
        fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
-       if (!fwdata)
+       if (!fwdata) {
+               ret = -ENOMEM;
                goto fw_exit;
+       }
 
        /* Allocate memory for receive */
        recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
@@ -1119,3 +1136,4 @@ MODULE_FIRMWARE(USB8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
index 57e1a5736318d6dec825f9348ed1fb7c5cd435db..f0051f8c8981eb4d7bd1614999a2db9238a66fd2 100644 (file)
@@ -32,6 +32,8 @@
 #define USB8897_PID_2          0x2046
 #define USB8801_PID_1          0x2049
 #define USB8801_PID_2          0x204a
+#define USB8997_PID_1          0x204d
+#define USB8997_PID_2          0x204e
 
 
 #define USB8XXX_FW_DNLD                1
@@ -46,6 +48,7 @@
 #define USB8797_DEFAULT_FW_NAME        "mrvl/usb8797_uapsta.bin"
 #define USB8801_DEFAULT_FW_NAME        "mrvl/usb8801_uapsta.bin"
 #define USB8897_DEFAULT_FW_NAME        "mrvl/usb8897_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME        "mrvl/usb8997_uapsta.bin"
 
 #define FW_DNLD_TX_BUF_SIZE    620
 #define FW_DNLD_RX_BUF_SIZE    2048
index 790e61953abffc8218ea3ecc6ec5cc2d4d9c630f..0cec8a64473e9c1f764b841b27c74c9b36800fc5 100644 (file)
@@ -126,6 +126,10 @@ static int num_of_items = ARRAY_SIZE(items);
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
 {
 
+       if (adapter->hw_status == MWIFIEX_HW_STATUS_READY)
+               if (adapter->if_ops.init_fw_port)
+                       adapter->if_ops.init_fw_port(adapter);
+
        adapter->init_wait_q_woken = true;
        wake_up_interruptible(&adapter->init_wait_q);
        return 0;
@@ -496,16 +500,12 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
                         struct cmd_ctrl_node *cmd_node)
 {
-       mwifiex_dbg(adapter, CMD,
-                   "cmd completed: status=%d\n",
+       WARN_ON(!cmd_node->wait_q_enabled);
+       mwifiex_dbg(adapter, CMD, "cmd completed: status=%d\n",
                    adapter->cmd_wait_q.status);
 
-       *(cmd_node->condition) = true;
-
-       if (adapter->cmd_wait_q.status == -ETIMEDOUT)
-               mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
-       else
-               wake_up_interruptible(&adapter->cmd_wait_q.wait);
+       *cmd_node->condition = true;
+       wake_up_interruptible(&adapter->cmd_wait_q.wait);
 
        return 0;
 }
@@ -531,6 +531,65 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
        return NULL;
 }
 
+static struct mwifiex_sta_node *
+mwifiex_get_tdls_sta_entry(struct mwifiex_private *priv, u8 status)
+{
+       struct mwifiex_sta_node *node;
+
+       list_for_each_entry(node, &priv->sta_list, list) {
+               if (node->tdls_status == status)
+                       return node;
+       }
+
+       return NULL;
+}
+
+/* If tdls channel switching is on-going, tx data traffic should be
+ * blocked until the switching stage completed.
+ */
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *sta_ptr;
+
+       if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+               return false;
+
+       sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_CHAN_SWITCHING);
+       if (sta_ptr)
+               return true;
+
+       return false;
+}
+
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *sta_ptr;
+
+       if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+               return false;
+
+       sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_IN_OFF_CHAN);
+       if (sta_ptr)
+               return true;
+
+       return false;
+}
+
+/* If tdls channel switching is on-going or tdls operate on off-channel,
+ * cmd path should be blocked until tdls switched to base-channel.
+ */
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv)
+{
+       if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+               return true;
+
+       if (mwifiex_is_tdls_chan_switching(priv) ||
+           mwifiex_is_tdls_off_chan(priv))
+               return false;
+
+       return true;
+}
+
 /* This function will add a sta_node entry to associated station list
  * table with the given mac address.
  * If entry exist already, existing entry is returned.
index a8ea21c3340c73537c8f597ad6dc2d176b45f588..173d3663c2e042bfe44e680e04acf86cf43f7c30 100644 (file)
@@ -160,9 +160,10 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
                ra_list->tdls_link = false;
                ra_list->ba_status = BA_SETUP_NONE;
                ra_list->amsdu_in_ampdu = false;
+               ra_list->tx_paused = false;
                if (!mwifiex_queuing_ra_based(priv)) {
-                       if (mwifiex_get_tdls_link_status(priv, ra) ==
-                           TDLS_SETUP_COMPLETE) {
+                       if (mwifiex_is_tdls_link_setup
+                               (mwifiex_get_tdls_link_status(priv, ra))) {
                                ra_list->tdls_link = true;
                                ra_list->is_11n_enabled =
                                        mwifiex_tdls_peer_11n_enabled(priv, ra);
@@ -448,6 +449,11 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
        }
 }
 
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
+{
+       return atomic_read(&adapter->bypass_tx_pending) ? false : true;
+}
+
 /*
  * This function checks if WMM Tx queue is empty.
  */
@@ -459,6 +465,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
 
        for (i = 0; i < adapter->priv_num; ++i) {
                priv = adapter->priv[i];
+               if (priv && !priv->port_open)
+                       continue;
                if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
                        return false;
        }
@@ -580,6 +588,10 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
        skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
                mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 
+       skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+               mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+       atomic_set(&priv->adapter->bypass_tx_pending, 0);
+
        idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
        idr_destroy(&priv->ack_status_frames);
 }
@@ -603,6 +615,88 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
        return NULL;
 }
 
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+                                   u8 tx_pause)
+{
+       struct mwifiex_ra_list_tbl *ra_list;
+       u32 pkt_cnt = 0, tx_pkts_queued;
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+       for (i = 0; i < MAX_NUM_TID; ++i) {
+               ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
+               if (ra_list && ra_list->tx_paused != tx_pause) {
+                       pkt_cnt += ra_list->total_pkt_count;
+                       ra_list->tx_paused = tx_pause;
+                       if (tx_pause)
+                               priv->wmm.pkts_paused[i] +=
+                                       ra_list->total_pkt_count;
+                       else
+                               priv->wmm.pkts_paused[i] -=
+                                       ra_list->total_pkt_count;
+               }
+       }
+
+       if (pkt_cnt) {
+               tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+               if (tx_pause)
+                       tx_pkts_queued -= pkt_cnt;
+               else
+                       tx_pkts_queued += pkt_cnt;
+
+               atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+               atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+       }
+       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+/* This function update non-tdls peer ralist tx_pause while
+ * tdls channel swithing
+ */
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+                                              u8 *mac, u8 tx_pause)
+{
+       struct mwifiex_ra_list_tbl *ra_list;
+       u32 pkt_cnt = 0, tx_pkts_queued;
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+       for (i = 0; i < MAX_NUM_TID; ++i) {
+               list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
+                                   list) {
+                       if (!memcmp(ra_list->ra, mac, ETH_ALEN))
+                               continue;
+
+                       if (ra_list && ra_list->tx_paused != tx_pause) {
+                               pkt_cnt += ra_list->total_pkt_count;
+                               ra_list->tx_paused = tx_pause;
+                               if (tx_pause)
+                                       priv->wmm.pkts_paused[i] +=
+                                               ra_list->total_pkt_count;
+                               else
+                                       priv->wmm.pkts_paused[i] -=
+                                               ra_list->total_pkt_count;
+                       }
+               }
+       }
+
+       if (pkt_cnt) {
+               tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+               if (tx_pause)
+                       tx_pkts_queued -= pkt_cnt;
+               else
+                       tx_pkts_queued += pkt_cnt;
+
+               atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+               atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+       }
+       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
 /*
  * This function retrieves an RA list node for a given TID and
  * RA address pair.
@@ -669,6 +763,18 @@ mwifiex_is_ralist_valid(struct mwifiex_private *priv,
        return false;
 }
 
+/*
+ * This function adds a packet to bypass TX queue.
+ * This is special TX queue for packets which can be sent even when port_open
+ * is false.
+ */
+void
+mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+                                  struct sk_buff *skb)
+{
+       skb_queue_tail(&priv->bypass_txq, skb);
+}
+
 /*
  * This function adds a packet to WMM queue.
  *
@@ -723,6 +829,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
            !mwifiex_is_skb_mgmt_frame(skb)) {
                switch (tdls_status) {
                case TDLS_SETUP_COMPLETE:
+               case TDLS_CHAN_SWITCHING:
+               case TDLS_IN_BASE_CHAN:
+               case TDLS_IN_OFF_CHAN:
                        ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
                                                              ra);
                        tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -765,7 +874,10 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
                atomic_set(&priv->wmm.highest_queued_prio,
                           priv->tos_to_tid_inv[tid_down]);
 
-       atomic_inc(&priv->wmm.tx_pkts_queued);
+       if (ra_list->tx_paused)
+               priv->wmm.pkts_paused[tid_down]++;
+       else
+               atomic_inc(&priv->wmm.tx_pkts_queued);
 
        spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
 }
@@ -970,7 +1082,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
 
                        priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
 
-                       if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
+                       if (!priv_tmp->port_open ||
+                           (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
                                continue;
 
                        /* iterate over the WMM queues of the BSS */
@@ -987,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
                                list_for_each_entry(ptr, &tid_ptr->ra_list,
                                                    list) {
 
-                                       if (!skb_queue_empty(&ptr->skb_head))
+                                       if (!ptr->tx_paused &&
+                                           !skb_queue_empty(&ptr->skb_head))
                                                /* holds both locks */
                                                goto found;
                                }
@@ -1339,6 +1453,38 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
        return 0;
 }
 
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
+{
+       struct mwifiex_tx_param tx_param;
+       struct sk_buff *skb;
+       struct mwifiex_txinfo *tx_info;
+       struct mwifiex_private *priv;
+       int i;
+
+       if (adapter->data_sent || adapter->tx_lock_flag)
+               return;
+
+       for (i = 0; i < adapter->priv_num; ++i) {
+               priv = adapter->priv[i];
+
+               if (skb_queue_empty(&priv->bypass_txq))
+                       continue;
+
+               skb = skb_dequeue(&priv->bypass_txq);
+               tx_info = MWIFIEX_SKB_TXCB(skb);
+
+               /* no aggregation for bypass packets */
+               tx_param.next_pkt_len = 0;
+
+               if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
+                       skb_queue_head(&priv->bypass_txq, skb);
+                       tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
+               } else {
+                       atomic_dec(&adapter->bypass_tx_pending);
+               }
+       }
+}
+
 /*
  * This function transmits the highest priority packet awaiting in the
  * WMM Queues.
index 48ece0b355919d3c3a4278dfc1727fc391f5848d..38f09762bd2f93ba90ef341ac5d0deda575046dc 100644 (file)
@@ -99,12 +99,16 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
                                 struct sk_buff *skb);
+void mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+                                       struct sk_buff *skb);
 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
                              struct mwifiex_ra_list_tbl *ra, int tid);
 
 int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter);
 void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter);
 int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
                            struct mwifiex_ra_list_tbl *ra_list, int tid);
 
@@ -126,6 +130,10 @@ struct mwifiex_ra_list_tbl *
 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
                            const u8 *ra_addr);
 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+                                   u8 tx_pause);
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+                                              u8 *mac, u8 tx_pause);
 
 struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
                                        *priv, u8 tid, const u8 *ra_addr);
index c410180479e694880f6a3c73561fd9ce4b670658..7b5c554323c73ab40caddb47ca965560d787d127 100644 (file)
@@ -2321,8 +2321,6 @@ void free_orinocodev(struct orinoco_private *priv)
        struct orinoco_rx_data *rx_data, *temp;
        struct orinoco_scan_data *sd, *sdtemp;
 
-       wiphy_unregister(wiphy);
-
        /* If the tasklet is scheduled when we call tasklet_kill it
         * will run one final time. However the tasklet will only
         * drain priv->rx_list if the hw is still available. */
index c0a27377d9e26306ea7dc32b44efc6749723bcda..a956f965a1e5ec77e19c03e86ec87dc810461315 100644 (file)
@@ -118,6 +118,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
 
        orinoco_cs_release(link);
 
+       wiphy_unregister(priv_to_wiphy(priv));
        free_orinocodev(priv);
 }                              /* orinoco_cs_detach */
 
index 1b543e30eff7b5fe4dafaa9cd904ecec1a4ef5db..048693b6c6c24f06e701a2fa5856484c228065a6 100644 (file)
@@ -223,13 +223,15 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
        err = orinoco_if_add(priv, 0, 0, NULL);
        if (err) {
                printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-               goto fail;
+               goto fail_wiphy;
        }
 
        pci_set_drvdata(pdev, priv);
 
        return 0;
 
+ fail_wiphy:
+       wiphy_unregister(priv_to_wiphy(priv));
  fail:
        free_irq(pdev->irq, priv);
 
@@ -263,6 +265,7 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
        iowrite16(0, card->bridge_io + 10);
 
        orinoco_if_del(priv);
+       wiphy_unregister(priv_to_wiphy(priv));
        free_irq(pdev->irq, priv);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
index 74219d59d7e1e5d7d918b42acac99b913157a499..4938a2208a37ce72371f245c78cf9de7d3c90345 100644 (file)
@@ -173,13 +173,15 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
        err = orinoco_if_add(priv, 0, 0, NULL);
        if (err) {
                printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-               goto fail;
+               goto fail_wiphy;
        }
 
        pci_set_drvdata(pdev, priv);
 
        return 0;
 
+ fail_wiphy:
+       wiphy_unregister(priv_to_wiphy(priv));
  fail:
        free_irq(pdev->irq, priv);
 
@@ -203,6 +205,7 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
        struct orinoco_private *priv = pci_get_drvdata(pdev);
 
        orinoco_if_del(priv);
+       wiphy_unregister(priv_to_wiphy(priv));
        free_irq(pdev->irq, priv);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
index 8b045236b6e0111b8661b0df0b8e5d428710f11d..221352027779f75c5ceb0e605b15297e53b70c2f 100644 (file)
@@ -262,13 +262,15 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
        err = orinoco_if_add(priv, 0, 0, NULL);
        if (err) {
                printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-               goto fail;
+               goto fail_wiphy;
        }
 
        pci_set_drvdata(pdev, priv);
 
        return 0;
 
+ fail_wiphy:
+       wiphy_unregister(priv_to_wiphy(priv));
  fail:
        free_irq(pdev->irq, priv);
 
@@ -299,6 +301,7 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
        struct orinoco_pci_card *card = priv->card;
 
        orinoco_if_del(priv);
+       wiphy_unregister(priv_to_wiphy(priv));
        free_irq(pdev->irq, priv);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
index 91f05442de28809a662290b66c26ac42be488144..26a57d773d3031147e5985cc004b3d80937d4081 100644 (file)
@@ -1502,6 +1502,7 @@ static inline void ezusb_delete(struct ezusb_priv *upriv)
        if (upriv->dev) {
                struct orinoco_private *priv = ndev_priv(upriv->dev);
                orinoco_if_del(priv);
+               wiphy_unregister(priv_to_wiphy(upriv));
                free_orinocodev(priv);
        }
 }
@@ -1695,6 +1696,7 @@ static int ezusb_probe(struct usb_interface *interface,
        if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
                upriv->dev = NULL;
                err("%s: orinoco_if_add() failed", __func__);
+               wiphy_unregister(priv_to_wiphy(priv));
                goto error;
        }
        upriv->dev = priv->ndev;
index c8058aa73ecfc1aa8f282cc80aa27b789f657906..629125658b8728678931848504be377e17849ecd 100644 (file)
@@ -200,7 +200,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl92c_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -209,7 +209,7 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
        if (!rtlhal->pfirmware)
                return 1;
 
-       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+       pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
        pfwdata = rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
        RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
@@ -219,10 +219,10 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         "Firmware Version(%d), Signature(%#x), Size(%d)\n",
                          pfwheader->version, pfwheader->signature,
-                         (int)sizeof(struct rtl92c_firmware_header));
+                         (int)sizeof(struct rtlwifi_firmware_header));
 
-               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        }
 
        if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
index 05e944e451f442f2d610502348c7e8c2c0e8098f..21bd4a5337abc633686b1d56e749f70a3b42c944 100644 (file)
@@ -37,7 +37,7 @@
 #define FW_8192C_POLLING_TIMEOUT_COUNT         3000
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)            \
-       ((_pfwhdr->signature&0xFFFF) == 0x88E1)
+       ((le16_to_cpu(_pfwhdr->signature) & 0xFFFF) == 0x88E1)
 #define USE_OLD_WOWLAN_DEBUG_FW                        0
 
 #define H2C_88E_RSVDPAGE_LOC_LEN               5
 #define        FW_PWR_STATE_ACTIVE     ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define        FW_PWR_STATE_RF_OFF             0
 
-struct rtl92c_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodesize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 enum rtl8188e_h2c_cmd {
        H2C_88E_RSVDPAGE = 0,
        H2C_88E_JOINBSSRPT = 1,
index 0aca6f47487c2fe293f082ec562f13172dc13c38..03cbe4cf110b5f42deabe86397fff9fa41101974 100644 (file)
@@ -39,6 +39,7 @@
 #define BT_RSSI_STATE_SPECIAL_LOW      BIT_OFFSET_LEN_MASK_32(2, 1)
 #define BT_RSSI_STATE_BG_EDCA_LOW      BIT_OFFSET_LEN_MASK_32(3, 1)
 #define BT_RSSI_STATE_TXPOWER_LOW      BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_MASK                                0x00ffffff
 
 #define RTLPRIV                        (struct rtl_priv *)
 #define GET_UNDECORATED_AVERAGE_RSSI(_priv)    \
@@ -312,7 +313,7 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
        struct dig_t *digtable = &rtlpriv->dm_digtable;
        u32 isbt;
 
-       /* modify DIG lower bound, deal with abnorally large false alarm */
+       /* modify DIG lower bound, deal with abnormally large false alarm */
        if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
                digtable->large_fa_hit++;
                if (digtable->forbidden_igi < digtable->cur_igvalue) {
@@ -1536,13 +1537,11 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
                return false;
 
        bt_state = rtl_read_byte(rtlpriv, 0x4fd);
-       bt_tx = rtl_read_dword(rtlpriv, 0x488);
-       bt_tx = bt_tx & 0x00ffffff;
-       bt_pri = rtl_read_dword(rtlpriv, 0x48c);
-       bt_pri = bt_pri & 0x00ffffff;
+       bt_tx = rtl_read_dword(rtlpriv, 0x488) & BT_MASK;
+       bt_pri = rtl_read_dword(rtlpriv, 0x48c) & BT_MASK;
        polling = rtl_read_dword(rtlpriv, 0x490);
 
-       if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
+       if (bt_tx == BT_MASK && bt_pri == BT_MASK &&
            polling == 0xffffffff && bt_state == 0xff)
                return false;
 
index 14b819ea8b71886cc6e6b46ec949e5ce1a87c39d..43fcb25c885f15d691b6ddb1300599cd0cf6b347 100644 (file)
@@ -221,7 +221,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl92c_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -230,19 +230,19 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
        if (!rtlhal->pfirmware)
                return 1;
 
-       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+       pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
        pfwdata = (u8 *)rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
        if (IS_FW_HEADER_EXIST(pfwheader)) {
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         "Firmware Version(%d), Signature(%#x),Size(%d)\n",
                          pfwheader->version, pfwheader->signature,
-                         (int)sizeof(struct rtl92c_firmware_header));
+                         (int)sizeof(struct rtlwifi_firmware_header));
 
-               rtlhal->fw_version = pfwheader->version;
+               rtlhal->fw_version = le16_to_cpu(pfwheader->version);
                rtlhal->fw_subversion = pfwheader->subversion;
-               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        }
 
        _rtl92c_enable_fw_download(hw, true);
index e9f4281f5067d9554ea22985ce9cc463a162a8e7..864806c19ca7b21043de97cdbbe79c9763b289b6 100644 (file)
        ((GET_CVID_CUT_VERSION(version) == \
                CHIP_VENDOR_UMC_B_CUT) ? true : false) : false)
 
-struct rtl92c_firmware_header {
-       __le16 signature;
-       u8 category;
-       u8 function;
-       __le16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       __le16 ramcodeSize;
-       __le16 rsvd2;
-       __le32 svnindex;
-       __le32 rsvd3;
-       __le32 rsvd4;
-       __le32 rsvd5;
-};
-
 #define pagenum_128(_len)      (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
 
 #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)                 \
index c940a87175ca15d701bad48abbe2be5dd755091c..74a479ac323d53716e5e9b02f7d179d4370ae5c5 100644 (file)
 /*-------------------------------------------------------------------------
  *     Chip specific
  *-------------------------------------------------------------------------*/
-#define CHIP_8723                      BIT(2) /* RTL8723 With BT feature */
-#define CHIP_8723_DRV_REV              BIT(3) /* RTL8723 Driver Revised */
 #define NORMAL_CHIP                    BIT(4)
 #define CHIP_VENDOR_UMC                        BIT(5)
 #define CHIP_VENDOR_UMC_B_CUT          BIT(6)
 
-#define IS_8723_SERIES(version)                \
-       (((version) & CHIP_8723) ? true : false)
-
 #define IS_92C_1T2R(version)           \
        (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
 
 #define IS_VENDOR_UMC(version)         \
        (((version) & CHIP_VENDOR_UMC) ? true : false)
 
-#define IS_VENDOR_8723_A_CUT(version)  \
-       (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
-       false : true) : false)
-
 #define CHIP_BONDING_92C_1T2R  0x1
 #define CHIP_BONDING_IDENTIFIER(_value)        (((_value) >> 22) & 0x3)
index 767358a553fb083dcc39297fe6f7ef6be74cbad4..7cf36619f25005e4395251ba663b2b15cd14e41a 100644 (file)
@@ -2280,7 +2280,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
        u8 u1tmp = 0;
        bool actuallyset = false;
@@ -2357,20 +2356,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
                if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
                        /* Enable register area 0x0-0xc. */
                        rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
-                       if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
-                               /*
-                                * We should configure HW PDn source for WiFi
-                                * ONLY, and then our HW will be set in
-                                * power-down mode if PDn source from all
-                                * functions are configured.
-                                */
-                               u1tmp = rtl_read_byte(rtlpriv,
-                                                     REG_MULTI_FUNC_CTRL);
-                               rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
-                                              (u1tmp|WL_HWPDN_EN));
-                       } else {
-                               rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
-                       }
+                       rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
                }
                if (e_rfpowerstate_toset == ERFOFF) {
                        if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM)
index 490a7cf7c702fa263b0f392b2ed103d0c4df6848..1c55a002d4bd9d9cc48986483a29cbfd5228b0ab 100644 (file)
@@ -69,8 +69,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
                chip_version = NORMAL_CHIP;
                chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
                chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
-               /* RTL8723 with BT function. */
-               chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
                if (IS_VENDOR_UMC(chip_version))
                        chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
                                         CHIP_VENDOR_UMC_B_CUT : 0);
@@ -78,10 +76,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
                        value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
                        chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
                                 CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
-               } else if (IS_8723_SERIES(chip_version)) {
-                       value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
-                       chip_version |= ((value32 & RF_RL_ID) ?
-                                         CHIP_8723_DRV_REV : 0);
                }
        }
        rtlhal->version  = (enum version_8192c)chip_version;
@@ -114,12 +108,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
        case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
                versionid = "NORMAL_UMC_CHIP_88C_B_CUT";
                break;
-       case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
-               versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT";
-               break;
-       case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
-               versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT";
-               break;
        case VERSION_TEST_CHIP_92C:
                versionid = "TEST_CHIP_92C";
                break;
index 587b8c505a7625e11b810fefc0b63af3367708e8..7c1db7e7572dc3fa44d6f7849250f537206475e1 100644 (file)
@@ -420,7 +420,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
                 "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
                 de_digtable->recover_cnt, de_digtable->rx_gain_min);
 
-       /* deal with abnorally large false alarm */
+       /* deal with abnormally large false alarm */
        if (falsealm_cnt->cnt_all > 10000) {
                RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
                         "dm_DIG(): Abnormally false alarm case\n");
index 1646e7c3d0f8cfe35e584697cb629cfcc1b7da72..8a38daa316cb502164634e9d58d8af507e2eaef7 100644 (file)
 #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)    \
        SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
 
-struct rtl92d_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodeSize;
-       u16 rsvd2;
-
-       u32 svnindex;
-       u32 rsvd3;
-
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 int rtl92d_download_fw(struct ieee80211_hw *hw);
 void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
                         u32 cmd_len, u8 *p_cmdbuffer);
index 1961b8e28dc16eb7261bc267641cf64b06791b77..bb06fe836fe753037925bf18dcfa6f8d4186ffb8 100644 (file)
@@ -3515,14 +3515,14 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
        for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
             rfpath++) {
                if (rtlhal->current_bandtype == BAND_ON_2_4G) {
-                       /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+                       /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
                        rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
                                      BIT(18), 0);
                        /* RF0x0b[16:14] =3b'111 */
                        rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
                                      0x1c000, 0x07);
                } else {
-                       /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+                       /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
                        rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) |
                                      BIT(16) | BIT(18),
                                      (BIT(16) | BIT(8)) >> 8);
index 232865cc3ffdd01a7672d8e4cf38203f33a8285d..0708eedd967132fc947e7b4b79aab22daafd3e10 100644 (file)
@@ -198,7 +198,7 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl92c_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -207,8 +207,8 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
        if (!rtlhal->pfirmware)
                return 1;
 
-       pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
-       rtlhal->fw_version = pfwheader->version;
+       pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+       rtlhal->fw_version = le16_to_cpu(pfwheader->version);
        rtlhal->fw_subversion = pfwheader->subversion;
        pfwdata = (u8 *)rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
@@ -219,10 +219,10 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         "Firmware Version(%d), Signature(%#x),Size(%d)\n",
                          pfwheader->version, pfwheader->signature,
-                         (int)sizeof(struct rtl92c_firmware_header));
+                         (int)sizeof(struct rtlwifi_firmware_header));
 
-               pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        } else {
                RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                         "Firmware no Header, Signature(%#x)\n",
index 3e2a48e5fb4deffb1ad2a5f7b78ed3050aef4fe9..069da1e7e80a481fbd17179917fdca018536a77d 100644 (file)
@@ -33,7 +33,7 @@
 #define FW_8192C_POLLING_TIMEOUT_COUNT         3000
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)    \
-       ((_pfwhdr->signature&0xFFF0) == 0x92E0)
+       ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x92E0)
 #define USE_OLD_WOWLAN_DEBUG_FW 0
 
 #define H2C_92E_RSVDPAGE_LOC_LEN               5
 #define        FW_PWR_STATE_ACTIVE     ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define        FW_PWR_STATE_RF_OFF     0
 
-struct rtl92c_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodesize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 enum rtl8192e_h2c_cmd {
        H2C_92E_RSVDPAGE = 0,
        H2C_92E_MSRRPT = 1,
index 8280bab43df4ce47755a1ab545e8dc2cf5aad086..3859b3e3d158d5e5dde4914073320283a2455dff 100644 (file)
@@ -205,9 +205,9 @@ bool rtl8723e_get_btc_status(void)
        return true;
 }
 
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
 {
-       return (hdr->signature & 0xfff0) == 0x2300;
+       return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300;
 }
 
 static struct rtl_hal_ops rtl8723e_hal_ops = {
index 1017f02d7bf7520868b25330493822e08f7826a0..3ba1e3218ed2957913a424cd06e6cb85038e2066 100644 (file)
@@ -209,9 +209,9 @@ bool rtl8723be_get_btc_status(void)
        return true;
 }
 
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
 {
-       return (hdr->signature & 0xfff0) == 0x5300;
+       return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300;
 }
 
 static struct rtl_hal_ops rtl8723be_hal_ops = {
index dd698e7e9aceffb90c1e5281f6da123f6635fc56..a2f5e89bedfed333b6f157e431b61239bd6c593c 100644 (file)
@@ -253,7 +253,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl8723e_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -263,7 +263,7 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
        if (!rtlhal->pfirmware)
                return 1;
 
-       pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware;
+       pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
        pfwdata = rtlhal->pfirmware;
        fwsize = rtlhal->fwsize;
 
@@ -275,10 +275,10 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
                         "Firmware Version(%d), Signature(%#x), Size(%d)\n",
                         pfwheader->version, pfwheader->signature,
-                        (int)sizeof(struct rtl8723e_firmware_header));
+                        (int)sizeof(struct rtlwifi_firmware_header));
 
-               pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl8723e_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        }
 
        if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
index 3ebafc80972fc1a3f5aae3f9071e96749ba966d2..8ea372d1626e5e75432c363f1b592df04a710817 100644 (file)
@@ -50,25 +50,6 @@ enum version_8723e {
        VERSION_UNKNOWN = 0xFF,
 };
 
-struct rtl8723e_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodesize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 enum rtl8723be_cmd {
        H2C_8723BE_RSVDPAGE = 0,
        H2C_8723BE_JOINBSSRPT = 1,
index 95e95626b6325c29c43ceb440b4b3ea0b352874e..525eb234627c965daa3d0c7e9611163b9734df3a 100644 (file)
@@ -210,7 +210,7 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl8821a_firmware_header *pfwheader;
+       struct rtlwifi_firmware_header *pfwheader;
        u8 *pfwdata;
        u32 fwsize;
        int err;
@@ -228,8 +228,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                        return 1;
 
                pfwheader =
-                 (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware;
-               rtlhal->fw_version = pfwheader->version;
+                 (struct rtlwifi_firmware_header *)rtlhal->wowlan_firmware;
+               rtlhal->fw_version = le16_to_cpu(pfwheader->version);
                rtlhal->fw_subversion = pfwheader->subversion;
                pfwdata = (u8 *)rtlhal->wowlan_firmware;
                fwsize = rtlhal->wowlan_fwsize;
@@ -238,8 +238,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                        return 1;
 
                pfwheader =
-                 (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
-               rtlhal->fw_version = pfwheader->version;
+                 (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+               rtlhal->fw_version = le16_to_cpu(pfwheader->version);
                rtlhal->fw_subversion = pfwheader->subversion;
                pfwdata = (u8 *)rtlhal->pfirmware;
                fwsize = rtlhal->fwsize;
@@ -255,8 +255,8 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
                         "Firmware Version(%d), Signature(%#x)\n",
                         pfwheader->version, pfwheader->signature);
 
-               pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
-               fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+               pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+               fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
        }
 
        if (rtlhal->mac_func_enable) {
index 591c14c0b9b52bd9247676cf972b18853874d07a..8f5b4aade3c91f356748dffa41318ce557c10bd5 100644 (file)
 #define FW_8821AE_POLLING_TIMEOUT_COUNT        6000
 
 #define IS_FW_HEADER_EXIST_8812(_pfwhdr)       \
-       ((_pfwhdr->signature&0xFFF0) == 0x9500)
+       ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x9500)
 
 #define IS_FW_HEADER_EXIST_8821(_pfwhdr)       \
-       ((_pfwhdr->signature&0xFFF0) == 0x2100)
+       ((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x2100)
 
 #define USE_OLD_WOWLAN_DEBUG_FW 0
 
 #define        FW_PWR_STATE_ACTIVE     ((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define        FW_PWR_STATE_RF_OFF     0
 
-struct rtl8821a_firmware_header {
-       u16 signature;
-       u8 category;
-       u8 function;
-       u16 version;
-       u8 subversion;
-       u8 rsvd1;
-       u8 month;
-       u8 date;
-       u8 hour;
-       u8 minute;
-       u16 ramcodeSize;
-       u16 rsvd2;
-       u32 svnindex;
-       u32 rsvd3;
-       u32 rsvd4;
-       u32 rsvd5;
-};
-
 enum rtl8812_c2h_evt {
        C2H_8812_DBG = 0,
        C2H_8812_LB = 1,
index 3236d44b459df69efd4f2b363f4b651e478a10a4..b7f18e2155eb18358cf4d4f9f3f82774f9b6f522 100644 (file)
@@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
 
        rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & 0xfc) == MSR_AP)
+       if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
index 53668fc8f23e211dab03085359ce013fc65b81c2..1d6110f9c1fb6e29e75e8bde9c1533b026a8b757 100644 (file)
 #define        MSR_ADHOC                               0x01
 #define        MSR_INFRA                               0x02
 #define        MSR_AP                                  0x03
+#define MSR_MASK                               0x03
 
 #define        RRSR_RSC_OFFSET                         21
 #define        RRSR_SHORT_OFFSET                       23
index 2b770b5e2620fc629ea2eca7ef1b499b300eaf8d..b90ca618b123209a1724bc808c4fdfe8ea330a69 100644 (file)
@@ -222,6 +222,25 @@ enum rf_tx_num {
 #define        WOL_REASON_REALWOW_V2_WAKEUPPKT BIT(9)
 #define        WOL_REASON_REALWOW_V2_ACKLOST   BIT(10)
 
+struct rtlwifi_firmware_header {
+       __le16 signature;
+       u8 category;
+       u8 function;
+       __le16 version;
+       u8 subversion;
+       u8 rsvd1;
+       u8 month;
+       u8 date;
+       u8 hour;
+       u8 minute;
+       __le16 ramcodeSize;
+       __le16 rsvd2;
+       __le32 svnindex;
+       __le32 rsvd3;
+       __le32 rsvd4;
+       __le32 rsvd5;
+};
+
 struct txpower_info_2g {
        u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
        u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -2064,16 +2083,12 @@ struct rtl_tcb_desc {
        bool tx_enable_sw_calc_duration;
 };
 
-struct rtl92c_firmware_header;
-
 struct rtl_wow_pattern {
        u8 type;
        u16 crc;
        u32 mask[4];
 };
 
-struct rtl8723e_firmware_header;
-
 struct rtl_hal_ops {
        int (*init_sw_vars) (struct ieee80211_hw *hw);
        void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -2177,7 +2192,7 @@ struct rtl_hal_ops {
        void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
                              u32 cmd_len, u8 *p_cmdbuffer);
        bool (*get_btc_status) (void);
-       bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr);
+       bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
        u32 (*rx_command_packet)(struct ieee80211_hw *hw,
                                 struct rtl_stats status, struct sk_buff *skb);
        void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
index 0c0d5cd98514207c25b3faf804573f13af78976e..7c355fff2c5ea50616407545639b32aa7d558d8f 100644 (file)
@@ -118,7 +118,11 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        if (passive)
                scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-       cmd->params.role_id = wlvif->role_id;
+       /* scan on the dev role if the regular one is not started */
+       if (wlcore_is_p2p_mgmt(wlvif))
+               cmd->params.role_id = wlvif->dev_role_id;
+       else
+               cmd->params.role_id = wlvif->role_id;
 
        if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
                ret = -EINVAL;
index 67f2a0eec8542190ea6d8c374240492d9b95ed73..4be0409308cb6f2a9d315b7ed77d2150f65ad6f0 100644 (file)
@@ -282,3 +282,30 @@ out:
        kfree(acx);
        return ret;
 }
+
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl)
+{
+       struct acx_dynamic_fw_traces_cfg *acx;
+       int ret;
+
+       wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d",
+                    wl->dynamic_fw_traces);
+
+       acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+       if (!acx) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces);
+
+       ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG,
+                                  acx, sizeof(*acx));
+       if (ret < 0) {
+               wl1271_warning("acx config dynamic fw traces failed: %d", ret);
+               goto out;
+       }
+out:
+       kfree(acx);
+       return ret;
+}
index 4afccd4b946752b6312bc2301ab13e02c14944da..c8a33f43916ef2dffe116229e661985a152ecc47 100644 (file)
@@ -35,7 +35,8 @@ enum {
        ACX_PEER_CAP                     = 0x0056,
        ACX_INTERRUPT_NOTIFY             = 0x0057,
        ACX_RX_BA_FILTER                 = 0x0058,
-       ACX_AP_SLEEP_CFG                 = 0x0059
+       ACX_AP_SLEEP_CFG                 = 0x0059,
+       ACX_DYNAMIC_TRACES_CFG           = 0x005A,
 };
 
 /* numbers of bits the length field takes (add 1 for the actual number) */
@@ -367,6 +368,15 @@ struct acx_ap_sleep_cfg {
        u8 idle_conn_thresh;
 } __packed;
 
+/*
+ * ACX_DYNAMIC_TRACES_CFG
+ * configure the FW dynamic traces
+ */
+struct acx_dynamic_fw_traces_cfg {
+       struct acx_header header;
+       __le32 dynamic_fw_traces;
+} __packed;
+
 int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
                                  u32 sdio_blk_size, u32 extra_mem_blks,
                                  u32 len_field_size);
@@ -380,5 +390,6 @@ int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
 int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
 int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
 int wl18xx_acx_ap_sleep(struct wl1271 *wl);
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
 
 #endif /* __WL18XX_ACX_H__ */
index 5fbd2230f372f2a17d763be90efa3228bca5bdbd..8c6a1c86f526981ba3f947f0948fd3acb46d9e6f 100644 (file)
@@ -281,6 +281,55 @@ static const struct file_operations radar_detection_ops = {
        .llseek = default_llseek,
 };
 
+static ssize_t dynamic_fw_traces_write(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct wl1271 *wl = file->private_data;
+       unsigned long value;
+       int ret;
+
+       ret = kstrtoul_from_user(user_buf, count, 0, &value);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&wl->mutex);
+
+       wl->dynamic_fw_traces = value;
+
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       ret = wl18xx_acx_dynamic_fw_traces(wl);
+       if (ret < 0)
+               count = ret;
+
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+       return count;
+}
+
+static ssize_t dynamic_fw_traces_read(struct file *file,
+                                       char __user *userbuf,
+                                       size_t count, loff_t *ppos)
+{
+       struct wl1271 *wl = file->private_data;
+       return wl1271_format_buffer(userbuf, count, ppos,
+                                   "%d\n", wl->dynamic_fw_traces);
+}
+
+static const struct file_operations dynamic_fw_traces_ops = {
+       .read = dynamic_fw_traces_read,
+       .write = dynamic_fw_traces_write,
+       .open = simple_open,
+       .llseek = default_llseek,
+};
+
 int wl18xx_debugfs_add_files(struct wl1271 *wl,
                             struct dentry *rootdir)
 {
@@ -433,6 +482,7 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl,
 
        DEBUGFS_ADD(conf, moddir);
        DEBUGFS_ADD(radar_detection, moddir);
+       DEBUGFS_ADD(dynamic_fw_traces, moddir);
 
        return 0;
 
index 548bb9e7e91ec054ad7da9022d325e42b007a4cf..09c7e098f4607bd6cb6d0b0f89654f63bcf6de74 100644 (file)
@@ -112,6 +112,14 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl,
        return 0;
 }
 
+static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb)
+{
+       u32 clock;
+       /* convert the MSB+LSB to a u32 TSF value */
+       clock = (tsf_msb << 16) | tsf_lsb;
+       wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock);
+}
+
 int wl18xx_process_mailbox_events(struct wl1271 *wl)
 {
        struct wl18xx_event_mailbox *mbox = wl->mbox;
@@ -128,6 +136,11 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl)
                        wl18xx_scan_completed(wl, wl->scan_wlvif);
        }
 
+       if (vector & TIME_SYNC_EVENT_ID)
+               wlcore_event_time_sync(wl,
+                               mbox->time_sync_tsf_msb,
+                               mbox->time_sync_tsf_lsb);
+
        if (vector & RADAR_DETECTED_EVENT_ID) {
                wl1271_info("radar event: channel %d type %s",
                            mbox->radar_channel,
index 266ee87834e4332e9fe87abdc6fb172e58db8d8c..f3d4f13379cb0dcd1846119d27941c8301167dac 100644 (file)
@@ -38,8 +38,9 @@ enum {
        REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(18),
        DFS_CHANNELS_CONFIG_COMPLETE_EVENT       = BIT(19),
        PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(20),
-       SMART_CONFIG_SYNC_EVENT_ID               = BIT(22),
-       SMART_CONFIG_DECODE_EVENT_ID             = BIT(23),
+       SMART_CONFIG_SYNC_EVENT_ID               = BIT(22),
+       SMART_CONFIG_DECODE_EVENT_ID             = BIT(23),
+       TIME_SYNC_EVENT_ID                       = BIT(24),
 };
 
 enum wl18xx_radar_types {
@@ -95,13 +96,16 @@ struct wl18xx_event_mailbox {
        /* smart config sync channel */
        u8 sc_sync_channel;
        u8 sc_sync_band;
-       u8 padding2[2];
 
+       /* time sync msb*/
+       u16 time_sync_tsf_msb;
        /* radar detect */
        u8 radar_channel;
        u8 radar_type;
 
-       u8 padding3[2];
+       /* time sync lsb*/
+       u16 time_sync_tsf_lsb;
+
 } __packed;
 
 int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
index 49aca2cf76050200771705ecb6c535ba1eee804e..abbf054fb6da892bee387e18345c41e842c01bd2 100644 (file)
@@ -422,6 +422,8 @@ static struct wlcore_conf wl18xx_conf = {
                .num_probe_reqs                 = 2,
                .rssi_threshold                 = -90,
                .snr_threshold                  = 0,
+               .num_short_intervals            = SCAN_MAX_SHORT_INTERVALS,
+               .long_interval                  = 30000,
        },
        .ht = {
                .rx_ba_win_size = 32,
@@ -1026,8 +1028,8 @@ static int wl18xx_boot(struct wl1271 *wl)
                CHANNEL_SWITCH_COMPLETE_EVENT_ID |
                DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
                SMART_CONFIG_SYNC_EVENT_ID |
-               SMART_CONFIG_DECODE_EVENT_ID;
-;
+               SMART_CONFIG_DECODE_EVENT_ID |
+               TIME_SYNC_EVENT_ID;
 
        wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
 
@@ -1159,6 +1161,11 @@ static int wl18xx_hw_init(struct wl1271 *wl)
        if (ret < 0)
                return ret;
 
+       /* set the dynamic fw traces bitmap */
+       ret = wl18xx_acx_dynamic_fw_traces(wl);
+       if (ret < 0)
+               return ret;
+
        if (checksum_param) {
                ret = wl18xx_acx_set_checksum_state(wl);
                if (ret != 0)
@@ -1797,7 +1804,7 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
 
 static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
        {
-               .max = 3,
+               .max = 2,
                .types = BIT(NL80211_IFTYPE_STATION),
        },
        {
@@ -1806,6 +1813,10 @@ static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
                         BIT(NL80211_IFTYPE_P2P_GO) |
                         BIT(NL80211_IFTYPE_P2P_CLIENT),
        },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
 };
 
 static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
@@ -1813,6 +1824,48 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
                .max = 2,
                .types = BIT(NL80211_IFTYPE_AP),
        },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_GO),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
 };
 
 static const struct ieee80211_iface_combination
index 98666f235a12d9a70873e18eaa3245298007284f..c938c494c785703b6f3b4a6a1fed48530028627c 100644 (file)
@@ -51,7 +51,11 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                goto out;
        }
 
-       cmd->role_id = wlvif->role_id;
+       /* scan on the dev role if the regular one is not started */
+       if (wlcore_is_p2p_mgmt(wlvif))
+               cmd->role_id = wlvif->dev_role_id;
+       else
+               cmd->role_id = wlvif->role_id;
 
        if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
                ret = -EINVAL;
@@ -223,9 +227,20 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
                                    SCAN_TYPE_PERIODIC);
        wl18xx_adjust_channels(cmd, cmd_channels);
 
-       cmd->short_cycles_sec = 0;
-       cmd->long_cycles_sec = cpu_to_le16(req->interval);
-       cmd->short_cycles_count = 0;
+       if (c->num_short_intervals && c->long_interval &&
+           c->long_interval > req->interval) {
+               cmd->short_cycles_msec = cpu_to_le16(req->interval);
+               cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
+               cmd->short_cycles_count = c->num_short_intervals;
+       } else {
+               cmd->short_cycles_msec = 0;
+               cmd->long_cycles_msec = cpu_to_le16(req->interval);
+               cmd->short_cycles_count = 0;
+       }
+       wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
+                    le16_to_cpu(cmd->short_cycles_msec),
+                    le16_to_cpu(cmd->long_cycles_msec),
+                    cmd->short_cycles_count);
 
        cmd->total_cycles = 0;
 
index 2e636aa5dba9bac8d11ff63d5fe02b7efc19af9c..66a763f644d2f8a043050623601f6c9c3fffce08 100644 (file)
@@ -74,8 +74,8 @@ struct wl18xx_cmd_scan_params {
        u8 dfs;            /* number of dfs channels in 5ghz */
        u8 passive_active; /* number of passive before active channels 2.4ghz */
 
-       __le16 short_cycles_sec;
-       __le16 long_cycles_sec;
+       __le16 short_cycles_msec;
+       __le16 long_cycles_msec;
        u8 short_cycles_count;
        u8 total_cycles; /* 0 - infinite */
        u8 padding[2];
index 68919f8d4310455fad623381ee16e2f1c96dac69..f01d24baff7cf00712cf07667f9b046b3d00dab2 100644 (file)
@@ -2003,12 +2003,15 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                      wlvif->bss_type == BSS_TYPE_IBSS)))
                return -EINVAL;
 
-       ret = wl12xx_cmd_role_enable(wl,
-                                    wl12xx_wlvif_to_vif(wlvif)->addr,
-                                    WL1271_ROLE_DEVICE,
-                                    &wlvif->dev_role_id);
-       if (ret < 0)
-               goto out;
+       /* the dev role is already started for p2p mgmt interfaces */
+       if (!wlcore_is_p2p_mgmt(wlvif)) {
+               ret = wl12xx_cmd_role_enable(wl,
+                                            wl12xx_wlvif_to_vif(wlvif)->addr,
+                                            WL1271_ROLE_DEVICE,
+                                            &wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+       }
 
        ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
        if (ret < 0)
@@ -2023,7 +2026,8 @@ int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 out_stop:
        wl12xx_cmd_role_stop_dev(wl, wlvif);
 out_disable:
-       wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+       if (!wlcore_is_p2p_mgmt(wlvif))
+               wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
 out:
        return ret;
 }
@@ -2052,10 +2056,42 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        if (ret < 0)
                goto out;
 
-       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
-       if (ret < 0)
-               goto out;
+       if (!wlcore_is_p2p_mgmt(wlvif)) {
+               ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+       }
 
 out:
        return ret;
 }
+
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                          u8 feature, u8 enable, u8 value)
+{
+       struct wlcore_cmd_generic_cfg *cmd;
+       int ret;
+
+       wl1271_debug(DEBUG_CMD,
+                    "cmd generic cfg (role %d feature %d enable %d value %d)",
+                    wlvif->role_id, feature, enable, value);
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->role_id = wlvif->role_id;
+       cmd->feature = feature;
+       cmd->enable = enable;
+       cmd->value = value;
+
+       ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0);
+       if (ret < 0) {
+               wl1271_error("failed to send generic cfg command");
+               goto out_free;
+       }
+out_free:
+       kfree(cmd);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg);
index e14cd407a6aea6295f786033a63a234c12bc6f6c..8dc46c0a489a1b6ea8d3e6ef752d13906e670feb 100644 (file)
@@ -92,6 +92,8 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
                                     enum ieee80211_band band);
 int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                          u8 feature, u8 enable, u8 value);
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
@@ -652,6 +654,19 @@ struct wl12xx_cmd_regdomain_dfs_config {
        u8 padding[3];
 } __packed;
 
+enum wlcore_generic_cfg_feature {
+       WLCORE_CFG_FEATURE_RADAR_DEBUG = 2,
+};
+
+struct wlcore_cmd_generic_cfg {
+       struct wl1271_cmd_header header;
+
+       u8 role_id;
+       u8 feature;
+       u8 enable;
+       u8 value;
+} __packed;
+
 struct wl12xx_cmd_config_fwlog {
        struct wl1271_cmd_header header;
 
index 166add00b50fb43984b458f4ab9cf254b9a456d5..52a9d1b140203612ba05285c1542f3ce7c28182d 100644 (file)
@@ -1186,6 +1186,15 @@ struct conf_sched_scan_settings {
 
        /* SNR threshold to be used for filtering */
        s8 snr_threshold;
+
+       /*
+        * number of short intervals scheduled scan cycles before
+        * switching to long intervals
+        */
+       u8 num_short_intervals;
+
+       /* interval between each long scheduled scan cycle (in ms) */
+       u16 long_interval;
 } __packed;
 
 struct conf_ht_setting {
@@ -1352,7 +1361,7 @@ struct conf_recovery_settings {
  * version, the two LSB are the lower driver's private conf
  * version.
  */
-#define WLCORE_CONF_VERSION    (0x0006 << 16)
+#define WLCORE_CONF_VERSION    (0x0007 << 16)
 #define WLCORE_CONF_MASK       0xffff0000
 #define WLCORE_CONF_SIZE       (sizeof(struct wlcore_conf_header) +    \
                                 sizeof(struct wlcore_conf))
index 5ca1fb161a50c8bbc612d859891fb9a3c8a441cf..e92f2639af2c8835d5c2faddd2c5630e99f89067 100644 (file)
@@ -348,7 +348,7 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
 }
 
 /* generic sta initialization (non vif-specific) */
-static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
        int ret;
 
index a45fbfddec192e67791be7cb23ac419c2b3a82ba..fd1cdb6bc3e4d1e6ee972a09728e7dde0a8217d3 100644 (file)
@@ -35,5 +35,6 @@ int wl1271_hw_init(struct wl1271 *wl);
 int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
 int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 
 #endif
index 337223b9f6f89aab54ae4c3cd93421e1ebb404fe..e819369d8f8f43a2ce37a5260c3c8c017c7ad5eb 100644 (file)
@@ -1792,6 +1792,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
 
        wl->wow_enabled = true;
        wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlcore_is_p2p_mgmt(wlvif))
+                       continue;
+
                ret = wl1271_configure_suspend(wl, wlvif, wow);
                if (ret < 0) {
                        mutex_unlock(&wl->mutex);
@@ -1901,6 +1904,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
                goto out;
 
        wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlcore_is_p2p_mgmt(wlvif))
+                       continue;
+
                wl1271_configure_resume(wl, wlvif);
        }
 
@@ -2256,6 +2262,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                wlvif->p2p = 1;
                /* fall-through */
        case NL80211_IFTYPE_STATION:
+       case NL80211_IFTYPE_P2P_DEVICE:
                wlvif->bss_type = BSS_TYPE_STA_BSS;
                break;
        case NL80211_IFTYPE_ADHOC:
@@ -2477,7 +2484,8 @@ static void wlcore_hw_queue_iter(void *data, u8 *mac,
 {
        struct wlcore_hw_queue_iter_data *iter_data = data;
 
-       if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+           WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
                return;
 
        if (iter_data->cur_running || vif == iter_data->vif) {
@@ -2495,6 +2503,11 @@ static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
        struct wlcore_hw_queue_iter_data iter_data = {};
        int i, q_base;
 
+       if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+               vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+               return 0;
+       }
+
        iter_data.vif = vif;
 
        /* mark all bits taken by active interfaces */
@@ -2618,14 +2631,27 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
                        goto out;
        }
 
-       ret = wl12xx_cmd_role_enable(wl, vif->addr,
-                                    role_type, &wlvif->role_id);
-       if (ret < 0)
-               goto out;
+       if (!wlcore_is_p2p_mgmt(wlvif)) {
+               ret = wl12xx_cmd_role_enable(wl, vif->addr,
+                                            role_type, &wlvif->role_id);
+               if (ret < 0)
+                       goto out;
 
-       ret = wl1271_init_vif_specific(wl, vif);
-       if (ret < 0)
-               goto out;
+               ret = wl1271_init_vif_specific(wl, vif);
+               if (ret < 0)
+                       goto out;
+
+       } else {
+               ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
+                                            &wlvif->dev_role_id);
+               if (ret < 0)
+                       goto out;
+
+               /* needed mainly for configuring rate policies */
+               ret = wl1271_sta_hw_init(wl, wlvif);
+               if (ret < 0)
+                       goto out;
+       }
 
        list_add(&wlvif->list, &wl->wlvif_list);
        set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
@@ -2696,9 +2722,15 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
                                wl12xx_stop_dev(wl, wlvif);
                }
 
-               ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
-               if (ret < 0)
-                       goto deinit;
+               if (!wlcore_is_p2p_mgmt(wlvif)) {
+                       ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
+                       if (ret < 0)
+                               goto deinit;
+               } else {
+                       ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+                       if (ret < 0)
+                               goto deinit;
+               }
 
                wl1271_ps_elp_sleep(wl);
        }
@@ -3088,6 +3120,9 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 {
        int ret;
 
+       if (wlcore_is_p2p_mgmt(wlvif))
+               return 0;
+
        if (conf->power_level != wlvif->power_level) {
                ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
                if (ret < 0)
@@ -3207,6 +3242,9 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
                goto out;
 
        wl12xx_for_each_wlvif(wl, wlvif) {
+               if (wlcore_is_p2p_mgmt(wlvif))
+                       continue;
+
                if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
                        if (*total & FIF_ALLMULTI)
                                ret = wl1271_acx_group_address_tbl(wl, wlvif,
@@ -4837,6 +4875,9 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
        u8 ps_scheme;
        int ret = 0;
 
+       if (wlcore_is_p2p_mgmt(wlvif))
+               return 0;
+
        mutex_lock(&wl->mutex);
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
@@ -6078,8 +6119,10 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
        wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
        wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
-               BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+                                        BIT(NL80211_IFTYPE_AP) |
+                                        BIT(NL80211_IFTYPE_P2P_DEVICE) |
+                                        BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                                        BIT(NL80211_IFTYPE_P2P_GO);
        wl->hw->wiphy->max_scan_ssids = 1;
        wl->hw->wiphy->max_sched_scan_ssids = 16;
        wl->hw->wiphy->max_match_sets = 16;
index e125974285cc890e0b40671886815c996ef37a11..5b2927391d1cef5aaa0c6c4f3947e7f51aa1a1eb 100644 (file)
@@ -74,7 +74,14 @@ static void wl1271_rx_status(struct wl1271 *wl,
        if (desc->rate <= wl->hw_min_ht_rate)
                status->flag |= RX_FLAG_HT;
 
-       status->signal = desc->rssi;
+       /*
+       * Read the signal level and antenna diversity indication.
+       * The msb in the signal level is always set as it is a
+       * negative number.
+       * The antenna indication is the msb of the rssi.
+       */
+       status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
+       status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
 
        /*
         * FIXME: In wl1251, the SNR should be divided by two.  In wl1271 we
index a3b1618db27c202db4377c8e529aee4751c8c32a..f5a7087cfb97831fea0296186d59b3acbc0d7ac5 100644 (file)
@@ -30,6 +30,9 @@
 #define WL1271_RX_MAX_RSSI -30
 #define WL1271_RX_MIN_RSSI -95
 
+#define RSSI_LEVEL_BITMASK     0x7F
+#define ANT_DIVERSITY_BITMASK  BIT(7)
+
 #define SHORT_PREAMBLE_BIT   BIT(0)
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
index 4dadd0c62cde5251f83d924e20b1c5312e3d2806..782eb297c196955c9d0b90002d8e8e05588ac511 100644 (file)
@@ -83,6 +83,12 @@ struct wl1271_cmd_trigger_scan_to {
 #define MAX_CHANNELS_5GHZ      42
 
 #define SCAN_MAX_CYCLE_INTERVALS 16
+
+/* The FW intervals can take up to 16 entries.
+ * The 1st entry isn't used (scan is immediate). The last
+ * entry should be used for the long_interval
+ */
+#define SCAN_MAX_SHORT_INTERVALS (SCAN_MAX_CYCLE_INTERVALS - 2)
 #define SCAN_MAX_BANDS 3
 
 enum {
index ea7e07abca4ebaccd37eaa1aac75567453b3e0a0..c172da56b550b8a59f88f5206a0160c1a878f5bb 100644 (file)
@@ -293,7 +293,8 @@ static int wl1271_probe(struct sdio_func *func,
        /* Use block mode for transferring over one block size of data */
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
-       if (wlcore_probe_of(&func->dev, &irq, &pdev_data))
+       ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+       if (ret)
                goto out_free_glue;
 
        /* if sdio can keep power while host is suspended, enable wow */
index 7f363fa566a3478b24e354abd3dadacb19275e6a..a1b6040e6491219544be4054343ff00f6c555db1 100644 (file)
@@ -500,6 +500,9 @@ struct wl1271 {
        /* interface combinations supported by the hw */
        const struct ieee80211_iface_combination *iface_combinations;
        u8 n_iface_combinations;
+
+       /* dynamic fw traces */
+       u32 dynamic_fw_traces;
 };
 
 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
index 39efc6d78b10dd743f65c0b96dfe72f86747afe3..27c56876b2c13821606a662a3d06a6ae4e24ccdb 100644 (file)
@@ -503,6 +503,11 @@ struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
        return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
 }
 
+static inline bool wlcore_is_p2p_mgmt(struct wl12xx_vif *wlvif)
+{
+       return wl12xx_wlvif_to_vif(wlvif)->type == NL80211_IFTYPE_P2P_DEVICE;
+}
+
 #define wl12xx_for_each_wlvif(wl, wlvif) \
                list_for_each_entry(wlvif, &wl->wlvif_list, list)
 
index 8a495b318b6f23bf66b19f4d77e557506cab5b0f..c6cb85a85c896fd6dcab466fa80f6dc2b81e8e04 100644 (file)
@@ -325,9 +325,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
                queue->pending_prod + queue->pending_cons;
 }
 
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
-
 irqreturn_t xenvif_interrupt(int irq, void *dev_id);
 
 extern bool separate_tx_rx_irq;
index 880d0d63e872e5725d76fe998db0282c749f45d6..7d50711476fe1e88debca95beb790d770261f036 100644 (file)
@@ -1566,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
                smp_rmb();
 
                while (dc != dp) {
-                       BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
+                       BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
                        pending_idx =
                                queue->dealloc_ring[pending_index(dc++)];
 
-                       pending_idx_release[gop-queue->tx_unmap_ops] =
+                       pending_idx_release[gop - queue->tx_unmap_ops] =
                                pending_idx;
-                       queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
+                       queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
                                queue->mmap_pages[pending_idx];
                        gnttab_set_unmap_op(gop,
                                            idx_to_kaddr(queue, pending_idx),
index a5233422f9dc5f770b98d0555560fb87a5f83ff0..7384455792bfb629ed6a2b9a5dbe40d1f58f2627 100644 (file)
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
                nvdimm_bus_unlock(dev);
        }
        if (is_nd_btt(dev) && probe) {
+               struct nd_btt *nd_btt = to_nd_btt(dev);
+
                nd_region = to_nd_region(dev->parent);
                nvdimm_bus_lock(dev);
                if (nd_region->btt_seed == dev)
                        nd_region_create_btt_seed(nd_region);
+               if (nd_region->ns_seed == &nd_btt->ndns->dev &&
+                               is_nd_blk(dev->parent))
+                       nd_region_create_blk_seed(nd_region);
                nvdimm_bus_unlock(dev);
        }
 }
index 8df1b1777745e8e0dfb46611197a8238c929af4e..59bb8556e43ac8f975485465ca8d1b0fdfeba3c5 100644 (file)
@@ -47,7 +47,7 @@ config OF_DYNAMIC
 
 config OF_ADDRESS
        def_bool y
-       depends on !SPARC
+       depends on !SPARC && HAS_IOMEM
        select OF_ADDRESS_PCI if PCI
 
 config OF_ADDRESS_PCI
index fdc60db608291b7e1fd81c6a3d774ba9c9c77467..7c8c23cc6896ca300e31c1d99801e168cb3615de 100644 (file)
@@ -266,7 +266,8 @@ EXPORT_SYMBOL(of_phy_attach);
 bool of_phy_is_fixed_link(struct device_node *np)
 {
        struct device_node *dn;
-       int len;
+       int len, err;
+       const char *managed;
 
        /* New binding */
        dn = of_get_child_by_name(np, "fixed-link");
@@ -275,6 +276,10 @@ bool of_phy_is_fixed_link(struct device_node *np)
                return true;
        }
 
+       err = of_property_read_string(np, "managed", &managed);
+       if (err == 0 && strcmp(managed, "auto") != 0)
+               return true;
+
        /* Old binding */
        if (of_get_property(np, "fixed-link", &len) &&
            len == (5 * sizeof(__be32)))
@@ -289,8 +294,18 @@ int of_phy_register_fixed_link(struct device_node *np)
        struct fixed_phy_status status = {};
        struct device_node *fixed_link_node;
        const __be32 *fixed_link_prop;
-       int len;
+       int len, err;
        struct phy_device *phy;
+       const char *managed;
+
+       err = of_property_read_string(np, "managed", &managed);
+       if (err == 0) {
+               if (strcmp(managed, "in-band-status") == 0) {
+                       /* status is zeroed, namely its .link member */
+                       phy = fixed_phy_register(PHY_POLL, &status, np);
+                       return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+               }
+       }
 
        /* New binding */
        fixed_link_node = of_get_child_by_name(np, "fixed-link");
index 18016341d5a91656b29220b414faddae48188648..9f71770b6226f9ed3d4ceab7bb95ea3f4ba55b6c 100644 (file)
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
        .remove                 = unittest_remove,
        .driver = {
                .name           = "unittest",
-               .owner          = THIS_MODULE,
                .of_match_table = of_match_ptr(unittest_match),
        },
 };
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
 static struct i2c_driver unittest_i2c_dev_driver = {
        .driver = {
                .name = "unittest-i2c-dev",
-               .owner = THIS_MODULE,
        },
        .probe = unittest_i2c_dev_probe,
        .remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
 static struct i2c_driver unittest_i2c_mux_driver = {
        .driver = {
                .name = "unittest-i2c-mux",
-               .owner = THIS_MODULE,
        },
        .probe = unittest_i2c_mux_probe,
        .remove = unittest_i2c_mux_remove,
index 8067f54ce050a6c8cf3a4c18d9b4ea560f017d94..5ce5ef211bdbdf575752e150edc6e26cc683e82d 100644 (file)
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
        par_dev->dev.release = free_pardevice;
        par_dev->devmodel = true;
        ret = device_register(&par_dev->dev);
-       if (ret)
-               goto err_put_dev;
+       if (ret) {
+               put_device(&par_dev->dev);
+               goto err_put_port;
+       }
 
        /* Chain this onto the list */
        par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
                        spin_unlock(&port->physport->pardevice_lock);
                        pr_debug("%s: cannot grant exclusive access for device %s\n",
                                 port->name, name);
-                       goto err_put_dev;
+                       device_unregister(&par_dev->dev);
+                       goto err_put_port;
                }
                port->flags |= PARPORT_FLAG_EXCL;
        }
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
 
        return par_dev;
 
-err_put_dev:
-       put_device(&par_dev->dev);
 err_free_devname:
        kfree(devname);
 err_free_par_dev:
index c0e6ede3e27d7cd07d92c247af58367e1b6d85a1..6b8dd162f644214ba24fd666b5aa6c94467d964d 100644 (file)
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO
 
 config PHY_PXA_28NM_HSIC
        tristate "Marvell USB HSIC 28nm PHY Driver"
+       depends on HAS_IOMEM
        select GENERIC_PHY
        help
          Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC
 
 config PHY_PXA_28NM_USB2
        tristate "Marvell USB 2.0 28nm PHY Driver"
+       depends on HAS_IOMEM
        select GENERIC_PHY
        help
          Enable this to support Marvell USB 2.0 PHY driver for Marvell
index c6fc95b530835569b040f193f6cf051f7198f7b9..335e06d66ed9a5100cbdda511e6c206238651814 100644 (file)
 
 static const u32 phy_berlin_pll_dividers[] = {
        /* Berlin 2 */
-       CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
-       /* Berlin 2CD */
        CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
+       /* Berlin 2CD/Q */
+       CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
 };
 
 struct phy_berlin_usb_priv {
index 53f295c1bab1a72108d84b955714b8aef4d7e951..3510b81db3faabcda59a7148e31a32ce403805a5 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/delay.h>
 #include <linux/phy/omap_control_phy.h>
 #include <linux/of_platform.h>
-#include <linux/spinlock.h>
 
 #define        PLL_STATUS              0x00000004
 #define        PLL_GO                  0x00000008
@@ -83,10 +82,6 @@ struct ti_pipe3 {
        struct clk              *refclk;
        struct clk              *div_clk;
        struct pipe3_dpll_map   *dpll_map;
-       bool                    enabled;
-       spinlock_t              lock;   /* serialize clock enable/disable */
-       /* the below flag is needed specifically for SATA */
-       bool                    refclk_enabled;
 };
 
 static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +132,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
        return NULL;
 }
 
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
+static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
+
 static int ti_pipe3_power_off(struct phy *x)
 {
        struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +215,7 @@ static int ti_pipe3_init(struct phy *x)
        u32 val;
        int ret = 0;
 
+       ti_pipe3_enable_clocks(phy);
        /*
         * Set pcie_pcs register to 0x96 for proper functioning of phy
         * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,33 +249,35 @@ static int ti_pipe3_exit(struct phy *x)
        u32 val;
        unsigned long timeout;
 
-       /* SATA DPLL can't be powered down due to Errata i783 and PCIe
-        * does not have internal DPLL
-        */
-       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") ||
-           of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie"))
+       /* SATA DPLL can't be powered down due to Errata i783 */
+       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
                return 0;
 
-       /* Put DPLL in IDLE mode */
-       val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
-       val |= PLL_IDLE;
-       ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
-
-       /* wait for LDO and Oscillator to power down */
-       timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
-       do {
-               cpu_relax();
-               val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
-               if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
-                       break;
-       } while (!time_after(jiffies, timeout));
+       /* PCIe doesn't have internal DPLL */
+       if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
+               /* Put DPLL in IDLE mode */
+               val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
+               val |= PLL_IDLE;
+               ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
 
-       if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
-               dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
-                       val);
-               return -EBUSY;
+               /* wait for LDO and Oscillator to power down */
+               timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
+               do {
+                       cpu_relax();
+                       val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
+                       if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
+                               break;
+               } while (!time_after(jiffies, timeout));
+
+               if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
+                       dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
+                               val);
+                       return -EBUSY;
+               }
        }
 
+       ti_pipe3_disable_clocks(phy);
+
        return 0;
 }
 static struct phy_ops ops = {
@@ -306,7 +307,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        phy->dev                = &pdev->dev;
-       spin_lock_init(&phy->lock);
 
        if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
                match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -402,6 +402,10 @@ static int ti_pipe3_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, phy);
        pm_runtime_enable(phy->dev);
+       /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */
+       if (of_device_is_compatible(node, "ti,phy-pipe3-sata"))
+               if (!IS_ERR(phy->refclk))
+                       clk_prepare_enable(phy->refclk);
 
        generic_phy = devm_phy_create(phy->dev, NULL, &ops);
        if (IS_ERR(generic_phy))
@@ -413,63 +417,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
        if (IS_ERR(phy_provider))
                return PTR_ERR(phy_provider);
 
-       pm_runtime_get(&pdev->dev);
-
        return 0;
 }
 
 static int ti_pipe3_remove(struct platform_device *pdev)
 {
-       if (!pm_runtime_suspended(&pdev->dev))
-               pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
+static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
 {
-       if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) {
-               int ret;
+       int ret = 0;
 
+       if (!IS_ERR(phy->refclk)) {
                ret = clk_prepare_enable(phy->refclk);
                if (ret) {
                        dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
                        return ret;
                }
-               phy->refclk_enabled = true;
        }
 
-       return 0;
-}
-
-static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
-{
-       if (!IS_ERR(phy->refclk))
-               clk_disable_unprepare(phy->refclk);
-
-       phy->refclk_enabled = false;
-}
-
-static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
-{
-       int ret = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&phy->lock, flags);
-       if (phy->enabled)
-               goto err1;
-
-       ret = ti_pipe3_enable_refclk(phy);
-       if (ret)
-               goto err1;
-
        if (!IS_ERR(phy->wkupclk)) {
                ret = clk_prepare_enable(phy->wkupclk);
                if (ret) {
                        dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
-                       goto err2;
+                       goto disable_refclk;
                }
        }
 
@@ -477,96 +451,33 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
                ret = clk_prepare_enable(phy->div_clk);
                if (ret) {
                        dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
-                       goto err3;
+                       goto disable_wkupclk;
                }
        }
 
-       phy->enabled = true;
-       spin_unlock_irqrestore(&phy->lock, flags);
        return 0;
 
-err3:
+disable_wkupclk:
        if (!IS_ERR(phy->wkupclk))
                clk_disable_unprepare(phy->wkupclk);
 
-err2:
+disable_refclk:
        if (!IS_ERR(phy->refclk))
                clk_disable_unprepare(phy->refclk);
 
-       ti_pipe3_disable_refclk(phy);
-err1:
-       spin_unlock_irqrestore(&phy->lock, flags);
        return ret;
 }
 
 static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&phy->lock, flags);
-       if (!phy->enabled) {
-               spin_unlock_irqrestore(&phy->lock, flags);
-               return;
-       }
-
        if (!IS_ERR(phy->wkupclk))
                clk_disable_unprepare(phy->wkupclk);
-       /* Don't disable refclk for SATA PHY due to Errata i783 */
-       if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
-               ti_pipe3_disable_refclk(phy);
+       if (!IS_ERR(phy->refclk))
+               clk_disable_unprepare(phy->refclk);
        if (!IS_ERR(phy->div_clk))
                clk_disable_unprepare(phy->div_clk);
-       phy->enabled = false;
-       spin_unlock_irqrestore(&phy->lock, flags);
-}
-
-static int ti_pipe3_runtime_suspend(struct device *dev)
-{
-       struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
-       ti_pipe3_disable_clocks(phy);
-       return 0;
 }
 
-static int ti_pipe3_runtime_resume(struct device *dev)
-{
-       struct ti_pipe3 *phy = dev_get_drvdata(dev);
-       int ret = 0;
-
-       ret = ti_pipe3_enable_clocks(phy);
-       return ret;
-}
-
-static int ti_pipe3_suspend(struct device *dev)
-{
-       struct ti_pipe3 *phy = dev_get_drvdata(dev);
-
-       ti_pipe3_disable_clocks(phy);
-       return 0;
-}
-
-static int ti_pipe3_resume(struct device *dev)
-{
-       struct ti_pipe3 *phy = dev_get_drvdata(dev);
-       int ret;
-
-       ret = ti_pipe3_enable_clocks(phy);
-       if (ret)
-               return ret;
-
-       pm_runtime_disable(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-       return 0;
-}
-#endif
-
-static const struct dev_pm_ops ti_pipe3_pm_ops = {
-       SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
-                          ti_pipe3_runtime_resume, NULL)
-       SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
-};
-
 static const struct of_device_id ti_pipe3_id_table[] = {
        {
                .compatible = "ti,phy-usb3",
@@ -592,7 +503,6 @@ static struct platform_driver ti_pipe3_driver = {
        .remove         = ti_pipe3_remove,
        .driver         = {
                .name   = "ti-pipe3",
-               .pm     = &ti_pipe3_pm_ops,
                .of_match_table = ti_pipe3_id_table,
        },
 };
index efcf2a2b3975c2cc35fda3806223f0578900d4c2..6177315ab74e5eb43cc92fa9d57c892fadce4b40 100644 (file)
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
 
        spin_lock_irqsave(&pc->irq_lock[bank], flags);
        bcm2835_gpio_irq_config(pc, gpio, false);
+       /* Clear events that were latched prior to clearing event sources */
+       bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
        clear_bit(offset, &pc->enabled_irq_map[bank]);
        spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
 }
index 5fd4437cee1584a6cc7a5251269d1078a0b62ddf..88a7fac11bd499f72c831b91f0f6c05bd29b19f7 100644 (file)
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
                             unsigned num_configs)
 {
        struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
-       const struct imx1_pinctrl_soc_info *info = ipctl->info;
        int i;
 
        for (i = 0; i != num_configs; ++i) {
                imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
 
                dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
-                       info->pins[pin_id].name);
+                       pin_desc_get(pctldev, pin_id)->name);
        }
 
        return 0;
index 557d0f2a3031b1a0295fe60796ec17ea7817fc52..97681fac082e71f449874a4a5a21a68b07e27fc2 100644 (file)
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
        .set_mux = abx500_pmx_set,
        .gpio_request_enable = abx500_gpio_request_enable,
        .gpio_disable_free = abx500_gpio_disable_free,
-       .strict = true,
 };
 
 static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
index ef0b697639a71e397ec545d17f6fe9ae9cec445d..347c763a6a78762302c89559a5ff92b64ec55872 100644 (file)
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
                break;
 
        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-               if (param)
+               if (param_val)
                        *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
                else
                        *reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
                break;
 
        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
-               if (param)
+               if (param_val)
                        *reg &= ~LPC18XX_SCU_PIN_ZIF;
                else
                        *reg |= LPC18XX_SCU_PIN_ZIF;
index b2de09d3b1a0c8e14b03e23fcb0131e6f41022ee..0b8d480171a3dfea7af0aa8b03e346d56b80f6bb 100644 (file)
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
                int res;
 
                res = request_irq(pcs_soc->irq, pcs_irq_handler,
-                                 IRQF_SHARED | IRQF_NO_SUSPEND,
+                                 IRQF_SHARED | IRQF_NO_SUSPEND |
+                                 IRQF_NO_THREAD,
                                  name, pcs_soc);
                if (res) {
                        pcs_soc->irq = -1;
index 3dd5a3b2ac62344a19713e92d9e7ad8fa5cf2461..c760bf43d116cfaa83370d640deefd38abb9f407 100644 (file)
 #include "../core.h"
 #include "pinctrl-samsung.h"
 
-#define GROUP_SUFFIX           "-grp"
-#define GSUFFIX_LEN            sizeof(GROUP_SUFFIX)
-#define FUNCTION_SUFFIX                "-mux"
-#define FSUFFIX_LEN            sizeof(FUNCTION_SUFFIX)
-
 /* list of all possible config options supported */
 static struct pin_config {
        const char *property;
index c7508d5f688613b26e6eb226628f61ba9cd38f66..0874cfee6889afa149fc18895de55c12ac2e5ff0 100644 (file)
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info {
 
 /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
 #define _GP_GPIO(bank, _pin, _name, sfx)                               \
-       [(bank * 32) + _pin] = {                                        \
+       {                                                               \
                .pin = (bank * 32) + _pin,                              \
                .name = __stringify(_name),                             \
                .enum_id = _name##_DATA,                                \
index f87a5eaf75dab42129491a07e1ba148baec0ef82..0afaf79a4e5175f99233726d07ea086758e72623 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * Inspired from:
  * - U300 Pinctl drivers
index dc8bf85ecb2aa5ae33f8de6dc6057dfd526fcbbd..27c2cc8d83adcf8d9e60a91460c20b7cfc2e2dcf 100644 (file)
@@ -2,7 +2,7 @@
  * Driver header file for the ST Microelectronics SPEAr pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a7bdc537efa79d1c744e88f6e44353a0b2f92c43..92611bb757acc2f4bbd75efdd575b6b3eb839e0b 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr1310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -2730,7 +2730,7 @@ static void __exit spear1310_pinctrl_exit(void)
 }
 module_exit(spear1310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
index f43ec85a03285db1ee473ae7d5140e0d87811642..f842e9dc40d0a85c5c056524edfb3752fe519597 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr1340 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -2046,7 +2046,7 @@ static void __exit spear1340_pinctrl_exit(void)
 }
 module_exit(spear1340_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
index da8990a8eeefcb6b0f4666bd60efdb7de3e9f384..d998a2ccff48ebd00b2a63ed938032ecc2b58a5c 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr300 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -703,7 +703,7 @@ static void __exit spear300_pinctrl_exit(void)
 }
 module_exit(spear300_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
index 31ede51e819bf0e41d1c4a8ba9946b6f395c7421..609b18aceb161269bf41e925930d096ceaae8afb 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr310 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -426,7 +426,7 @@ static void __exit spear310_pinctrl_exit(void)
 }
 module_exit(spear310_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match);
index 506e40b641e048950d0f133dcdc5639112f7b4f2..c07114431bd46c6541409e67c920ff6825a94a07 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr320 pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
@@ -3467,7 +3467,7 @@ static void __exit spear320_pinctrl_exit(void)
 }
 module_exit(spear320_pinctrl_exit);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
 MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
index 12ee21af766b1ffafedc935e4bebbdb00065baf6..d3119aafe7090c66798c458534774509e5c4120e 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 7860b36053c4523f69a5861f9fd832afae0983e8..ce19dcf8f08b256a70e017afa7871ecac9c20569 100644 (file)
@@ -2,7 +2,7 @@
  * Header file for the ST Microelectronics SPEAr3xx pinmux
  *
  * Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index ed317ccac4a2dc8c490291cb561bb986203dcef2..aaeeae81e3a9798de43aaa80fb4aba2f247ad3ab 100644 (file)
@@ -309,12 +309,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
 static struct calling_interface_buffer *buffer;
 static DEFINE_MUTEX(buffer_mutex);
 
-static int hwswitch_state;
+static void clear_buffer(void)
+{
+       memset(buffer, 0, sizeof(struct calling_interface_buffer));
+}
 
 static void get_buffer(void)
 {
        mutex_lock(&buffer_mutex);
-       memset(buffer, 0, sizeof(struct calling_interface_buffer));
+       clear_buffer();
 }
 
 static void release_buffer(void)
@@ -548,21 +551,41 @@ static int dell_rfkill_set(void *data, bool blocked)
        int disable = blocked ? 1 : 0;
        unsigned long radio = (unsigned long)data;
        int hwswitch_bit = (unsigned long)data - 1;
+       int hwswitch;
+       int status;
+       int ret;
 
        get_buffer();
+
+       dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
+       status = buffer->output[1];
+
+       if (ret != 0)
+               goto out;
+
+       clear_buffer();
+
+       buffer->input[0] = 0x2;
        dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
+       hwswitch = buffer->output[1];
 
        /* If the hardware switch controls this radio, and the hardware
           switch is disabled, always disable the radio */
-       if ((hwswitch_state & BIT(hwswitch_bit)) &&
-           !(buffer->output[1] & BIT(16)))
+       if (ret == 0 && (hwswitch & BIT(hwswitch_bit)) &&
+           (status & BIT(0)) && !(status & BIT(16)))
                disable = 1;
 
+       clear_buffer();
+
        buffer->input[0] = (1 | (radio<<8) | (disable << 16));
        dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
 
+ out:
        release_buffer();
-       return 0;
+       return dell_smi_error(ret);
 }
 
 /* Must be called with the buffer held */
@@ -572,6 +595,7 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
        if (status & BIT(0)) {
                /* Has hw-switch, sync sw_state to BIOS */
                int block = rfkill_blocked(rfkill);
+               clear_buffer();
                buffer->input[0] = (1 | (radio << 8) | (block << 16));
                dell_send_request(buffer, 17, 11);
        } else {
@@ -581,23 +605,43 @@ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio,
 }
 
 static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio,
-                                       int status)
+                                       int status, int hwswitch)
 {
-       if (hwswitch_state & (BIT(radio - 1)))
+       if (hwswitch & (BIT(radio - 1)))
                rfkill_set_hw_state(rfkill, !(status & BIT(16)));
 }
 
 static void dell_rfkill_query(struct rfkill *rfkill, void *data)
 {
+       int radio = ((unsigned long)data & 0xF);
+       int hwswitch;
        int status;
+       int ret;
 
        get_buffer();
+
        dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
        status = buffer->output[1];
 
-       dell_rfkill_update_hw_state(rfkill, (unsigned long)data, status);
+       if (ret != 0 || !(status & BIT(0))) {
+               release_buffer();
+               return;
+       }
+
+       clear_buffer();
+
+       buffer->input[0] = 0x2;
+       dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
+       hwswitch = buffer->output[1];
 
        release_buffer();
+
+       if (ret != 0)
+               return;
+
+       dell_rfkill_update_hw_state(rfkill, radio, status, hwswitch);
 }
 
 static const struct rfkill_ops dell_rfkill_ops = {
@@ -609,13 +653,27 @@ static struct dentry *dell_laptop_dir;
 
 static int dell_debugfs_show(struct seq_file *s, void *data)
 {
+       int hwswitch_state;
+       int hwswitch_ret;
        int status;
+       int ret;
 
        get_buffer();
+
        dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
        status = buffer->output[1];
+
+       clear_buffer();
+
+       buffer->input[0] = 0x2;
+       dell_send_request(buffer, 17, 11);
+       hwswitch_ret = buffer->output[0];
+       hwswitch_state = buffer->output[1];
+
        release_buffer();
 
+       seq_printf(s, "return:\t%d\n", ret);
        seq_printf(s, "status:\t0x%X\n", status);
        seq_printf(s, "Bit 0 : Hardware switch supported:   %lu\n",
                   status & BIT(0));
@@ -657,7 +715,8 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
        seq_printf(s, "Bit 21: WiGig is blocked:            %lu\n",
                  (status & BIT(21)) >> 21);
 
-       seq_printf(s, "\nhwswitch_state:\t0x%X\n", hwswitch_state);
+       seq_printf(s, "\nhwswitch_return:\t%d\n", hwswitch_ret);
+       seq_printf(s, "hwswitch_state:\t0x%X\n", hwswitch_state);
        seq_printf(s, "Bit 0 : Wifi controlled by switch:      %lu\n",
                   hwswitch_state & BIT(0));
        seq_printf(s, "Bit 1 : Bluetooth controlled by switch: %lu\n",
@@ -693,25 +752,43 @@ static const struct file_operations dell_debugfs_fops = {
 
 static void dell_update_rfkill(struct work_struct *ignored)
 {
+       int hwswitch = 0;
        int status;
+       int ret;
 
        get_buffer();
+
        dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
        status = buffer->output[1];
 
+       if (ret != 0)
+               goto out;
+
+       clear_buffer();
+
+       buffer->input[0] = 0x2;
+       dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
+
+       if (ret == 0 && (status & BIT(0)))
+               hwswitch = buffer->output[1];
+
        if (wifi_rfkill) {
-               dell_rfkill_update_hw_state(wifi_rfkill, 1, status);
+               dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch);
                dell_rfkill_update_sw_state(wifi_rfkill, 1, status);
        }
        if (bluetooth_rfkill) {
-               dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status);
+               dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status,
+                                           hwswitch);
                dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status);
        }
        if (wwan_rfkill) {
-               dell_rfkill_update_hw_state(wwan_rfkill, 3, status);
+               dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch);
                dell_rfkill_update_sw_state(wwan_rfkill, 3, status);
        }
 
+ out:
        release_buffer();
 }
 static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
@@ -773,21 +850,17 @@ static int __init dell_setup_rfkill(void)
 
        get_buffer();
        dell_send_request(buffer, 17, 11);
+       ret = buffer->output[0];
        status = buffer->output[1];
-       buffer->input[0] = 0x2;
-       dell_send_request(buffer, 17, 11);
-       hwswitch_state = buffer->output[1];
        release_buffer();
 
-       if (!(status & BIT(0))) {
-               if (force_rfkill) {
-                       /* No hwsitch, clear all hw-controlled bits */
-                       hwswitch_state &= ~7;
-               } else {
-                       /* rfkill is only tested on laptops with a hwswitch */
-                       return 0;
-               }
-       }
+       /* dell wireless info smbios call is not supported */
+       if (ret != 0)
+               return 0;
+
+       /* rfkill is only tested on laptops with a hwswitch */
+       if (!(status & BIT(0)) && !force_rfkill)
+               return 0;
 
        if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
                wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
@@ -932,47 +1005,50 @@ static void dell_cleanup_rfkill(void)
 
 static int dell_send_intensity(struct backlight_device *bd)
 {
-       int ret = 0;
+       int token;
+       int ret;
+
+       token = find_token_location(BRIGHTNESS_TOKEN);
+       if (token == -1)
+               return -ENODEV;
 
        get_buffer();
-       buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
+       buffer->input[0] = token;
        buffer->input[1] = bd->props.brightness;
 
-       if (buffer->input[0] == -1) {
-               ret = -ENODEV;
-               goto out;
-       }
-
        if (power_supply_is_system_supplied() > 0)
                dell_send_request(buffer, 1, 2);
        else
                dell_send_request(buffer, 1, 1);
 
- out:
+       ret = dell_smi_error(buffer->output[0]);
+
        release_buffer();
        return ret;
 }
 
 static int dell_get_intensity(struct backlight_device *bd)
 {
-       int ret = 0;
+       int token;
+       int ret;
 
-       get_buffer();
-       buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
+       token = find_token_location(BRIGHTNESS_TOKEN);
+       if (token == -1)
+               return -ENODEV;
 
-       if (buffer->input[0] == -1) {
-               ret = -ENODEV;
-               goto out;
-       }
+       get_buffer();
+       buffer->input[0] = token;
 
        if (power_supply_is_system_supplied() > 0)
                dell_send_request(buffer, 0, 2);
        else
                dell_send_request(buffer, 0, 1);
 
-       ret = buffer->output[1];
+       if (buffer->output[0])
+               ret = dell_smi_error(buffer->output[0]);
+       else
+               ret = buffer->output[1];
 
- out:
        release_buffer();
        return ret;
 }
@@ -2036,6 +2112,7 @@ static void kbd_led_exit(void)
 static int __init dell_init(void)
 {
        int max_intensity = 0;
+       int token;
        int ret;
 
        if (!dmi_check_system(dell_device_table))
@@ -2094,13 +2171,15 @@ static int __init dell_init(void)
        if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
                return 0;
 
-       get_buffer();
-       buffer->input[0] = find_token_location(BRIGHTNESS_TOKEN);
-       if (buffer->input[0] != -1) {
+       token = find_token_location(BRIGHTNESS_TOKEN);
+       if (token != -1) {
+               get_buffer();
+               buffer->input[0] = token;
                dell_send_request(buffer, 0, 2);
-               max_intensity = buffer->output[3];
+               if (buffer->output[0] == 0)
+                       max_intensity = buffer->output[3];
+               release_buffer();
        }
-       release_buffer();
 
        if (max_intensity) {
                struct backlight_properties props;
index d734763dab6970fac3b9916a4596b1fe2d0fcd58..105cfffe82c617297d0bf348c1c5ae669060b4d2 100644 (file)
@@ -96,18 +96,18 @@ static struct intel_pmc_ipc_dev {
        struct completion cmd_complete;
 
        /* The following PMC BARs share the same ACPI device with the IPC */
-       void *acpi_io_base;
+       resource_size_t acpi_io_base;
        int acpi_io_size;
        struct platform_device *tco_dev;
 
        /* gcr */
-       void *gcr_base;
+       resource_size_t gcr_base;
        int gcr_size;
 
        /* punit */
-       void *punit_base;
+       resource_size_t punit_base;
        int punit_size;
-       void *punit_base2;
+       resource_size_t punit_base2;
        int punit_size2;
        struct platform_device *punit_dev;
 } ipcdev;
@@ -210,10 +210,15 @@ static int intel_pmc_ipc_check_status(void)
        return ret;
 }
 
-/*
- * intel_pmc_ipc_simple_command
- * @cmd: command
- * @sub: sub type
+/**
+ * intel_pmc_ipc_simple_command() - Simple IPC command
+ * @cmd:       IPC command code.
+ * @sub:       IPC command sub type.
+ *
+ * Send a simple IPC command to PMC when don't need to specify
+ * input/output data and source/dest pointers.
+ *
+ * Return:     an IPC error code or 0 on success.
  */
 int intel_pmc_ipc_simple_command(int cmd, int sub)
 {
@@ -232,16 +237,20 @@ int intel_pmc_ipc_simple_command(int cmd, int sub)
 }
 EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command);
 
-/*
- * intel_pmc_ipc_raw_cmd
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
- * @sptr: data writing to SPTR register
- * @dptr: data writing to DPTR register
+/**
+ * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers
+ * @cmd:       IPC command code.
+ * @sub:       IPC command sub type.
+ * @in:                input data of this IPC command.
+ * @inlen:     input data length in bytes.
+ * @out:       output data of this IPC command.
+ * @outlen:    output data length in dwords.
+ * @sptr:      data writing to SPTR register.
+ * @dptr:      data writing to DPTR register.
+ *
+ * Send an IPC command to PMC with input/output data and source/dest pointers.
+ *
+ * Return:     an IPC error code or 0 on success.
  */
 int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
                          u32 outlen, u32 dptr, u32 sptr)
@@ -278,14 +287,18 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out,
 }
 EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd);
 
-/*
- * intel_pmc_ipc_command
- * @cmd: command
- * @sub: sub type
- * @in: input data
- * @inlen: input length in bytes
- * @out: output data
- * @outlen: output length in dwords
+/**
+ * intel_pmc_ipc_command() -  IPC command with input/output data
+ * @cmd:       IPC command code.
+ * @sub:       IPC command sub type.
+ * @in:                input data of this IPC command.
+ * @inlen:     input data length in bytes.
+ * @out:       output data of this IPC command.
+ * @outlen:    output data length in dwords.
+ *
+ * Send an IPC command to PMC with input/output data.
+ *
+ * Return:     an IPC error code or 0 on success.
  */
 int intel_pmc_ipc_command(u32 cmd, u32 sub, u8 *in, u32 inlen,
                          u32 *out, u32 outlen)
@@ -480,11 +493,11 @@ static int ipc_create_punit_device(void)
        pdev->dev.parent = ipcdev.dev;
 
        res = punit_res;
-       res->start = (resource_size_t)ipcdev.punit_base;
+       res->start = ipcdev.punit_base;
        res->end = res->start + ipcdev.punit_size - 1;
 
        res = punit_res + PUNIT_RESOURCE_INTER;
-       res->start = (resource_size_t)ipcdev.punit_base2;
+       res->start = ipcdev.punit_base2;
        res->end = res->start + ipcdev.punit_size2 - 1;
 
        ret = platform_device_add_resources(pdev, punit_res,
@@ -522,15 +535,15 @@ static int ipc_create_tco_device(void)
        pdev->dev.parent = ipcdev.dev;
 
        res = tco_res + TCO_RESOURCE_ACPI_IO;
-       res->start = (resource_size_t)ipcdev.acpi_io_base + TCO_BASE_OFFSET;
+       res->start = ipcdev.acpi_io_base + TCO_BASE_OFFSET;
        res->end = res->start + TCO_REGS_SIZE - 1;
 
        res = tco_res + TCO_RESOURCE_SMI_EN_IO;
-       res->start = (resource_size_t)ipcdev.acpi_io_base + SMI_EN_OFFSET;
+       res->start = ipcdev.acpi_io_base + SMI_EN_OFFSET;
        res->end = res->start + SMI_EN_SIZE - 1;
 
        res = tco_res + TCO_RESOURCE_GCR_MEM;
-       res->start = (resource_size_t)ipcdev.gcr_base;
+       res->start = ipcdev.gcr_base;
        res->end = res->start + ipcdev.gcr_size - 1;
 
        ret = platform_device_add_resources(pdev, tco_res, ARRAY_SIZE(tco_res));
@@ -589,7 +602,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
                return -ENXIO;
        }
        size = resource_size(res);
-       ipcdev.acpi_io_base = (void *)res->start;
+       ipcdev.acpi_io_base = res->start;
        ipcdev.acpi_io_size = size;
        dev_info(&pdev->dev, "io res: %llx %x\n",
                 (long long)res->start, (int)resource_size(res));
@@ -601,7 +614,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
                return -ENXIO;
        }
        size = resource_size(res);
-       ipcdev.punit_base = (void *)res->start;
+       ipcdev.punit_base = res->start;
        ipcdev.punit_size = size;
        dev_info(&pdev->dev, "punit data res: %llx %x\n",
                 (long long)res->start, (int)resource_size(res));
@@ -613,7 +626,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
                return -ENXIO;
        }
        size = resource_size(res);
-       ipcdev.punit_base2 = (void *)res->start;
+       ipcdev.punit_base2 = res->start;
        ipcdev.punit_size2 = size;
        dev_info(&pdev->dev, "punit interface res: %llx %x\n",
                 (long long)res->start, (int)resource_size(res));
@@ -637,7 +650,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
        }
        ipcdev.ipc_base = addr;
 
-       ipcdev.gcr_base = (void *)(res->start + size);
+       ipcdev.gcr_base = res->start + size;
        ipcdev.gcr_size = PLAT_RESOURCE_GCR_SIZE;
        dev_info(&pdev->dev, "ipc res: %llx %x\n",
                 (long long)res->start, (int)resource_size(res));
index 001b199a8c33d3a90e6693edd6475a63bf6dc6a5..187d1086d15c3ddc4d939715cc65cd33b820409e 100644 (file)
@@ -216,13 +216,13 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
        int nc;
        u32 offset = 0;
        int err;
-       u8 cbuf[IPC_WWBUF_SIZE] = { };
+       u8 cbuf[IPC_WWBUF_SIZE];
        u32 *wbuf = (u32 *)&cbuf;
 
-       mutex_lock(&ipclock);
-
        memset(cbuf, 0, sizeof(cbuf));
 
+       mutex_lock(&ipclock);
+
        if (ipcdev.pdev == NULL) {
                mutex_unlock(&ipclock);
                return -ENODEV;
index 832932bdc977d21e84f2f8f563ce8aa937c64bc1..7fd4f511d78fd6bdc685c857e280f1581d20b035 100644 (file)
@@ -130,7 +130,7 @@ struct pm800_regulators {
                .owner  = THIS_MODULE,                                  \
                .n_voltages = ARRAY_SIZE(ldo_volt_table),               \
                .vsel_reg       = PM800_##vreg##_VOUT,                  \
-               .vsel_mask      = 0x1f,                                 \
+               .vsel_mask      = 0xf,                                  \
                .enable_reg     = PM800_##ereg,                         \
                .enable_mask    = 1 << (ebit),                          \
                .volt_table     = ldo_volt_table,                       \
index c9f72019bd689afbb4e51528932689dc097b191b..78387a6cbae59e40a6fb05fc255647cacfe3209b 100644 (file)
@@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
 static struct regulator *create_regulator(struct regulator_dev *rdev,
                                          struct device *dev,
                                          const char *supply_name);
+static void _regulator_put(struct regulator *regulator);
 
 static const char *rdev_get_name(struct regulator_dev *rdev)
 {
@@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev,
 
        rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
 
+       if (!try_module_get(supply_rdev->owner))
+               return -ENODEV;
+
        rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
        if (rdev->supply == NULL) {
                err = -ENOMEM;
@@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        }
 
        if (!r) {
-               dev_err(dev, "Failed to resolve %s-supply for %s\n",
-                       rdev->supply_name, rdev->desc->name);
-               return -EPROBE_DEFER;
+               if (have_full_constraints()) {
+                       r = dummy_regulator_rdev;
+               } else {
+                       dev_err(dev, "Failed to resolve %s-supply for %s\n",
+                               rdev->supply_name, rdev->desc->name);
+                       return -EPROBE_DEFER;
+               }
        }
 
        /* Recursively resolve the supply of the supply */
@@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        /* Cascade always-on state to supply */
        if (_regulator_is_enabled(rdev)) {
                ret = regulator_enable(rdev->supply);
-               if (ret < 0)
+               if (ret < 0) {
+                       if (rdev->supply)
+                               _regulator_put(rdev->supply);
                        return ret;
+               }
        }
 
        return 0;
index 6f2bdad8b4d8fd6ce2e552d4c44c8cf4a3ee60ba..e94ddcf97722331e3cdde0645697169a35ba8c12 100644 (file)
@@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
                pdata->control_flags  |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
 
        if (of_property_read_bool(np, "maxim,enable-bias-control"))
-               pdata->control_flags  |= MAX8973_BIAS_ENABLE;
+               pdata->control_flags  |= MAX8973_CONTROL_BIAS_ENABLE;
 
        return pdata;
 }
index 326ffb55337117d6dc2a96d7fce9cea773e18a14..72fc3c32db49828ce6a2a9256ecabd01bdb034e8 100644 (file)
@@ -34,6 +34,8 @@
 #include <linux/mfd/samsung/s2mps14.h>
 #include <linux/mfd/samsung/s2mpu02.h>
 
+/* The highest number of possible regulators for supported devices. */
+#define S2MPS_REGULATOR_MAX            S2MPS13_REGULATOR_MAX
 struct s2mps11_info {
        unsigned int rdev_num;
        int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
         * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
         * the suspend mode was enabled.
         */
-       unsigned long long s2mps14_suspend_state:50;
+       DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
 
        /* Array of size rdev_num with GPIO-s for external sleep control */
        int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
        switch (s2mps11->dev_type) {
        case S2MPS13X:
        case S2MPS14X:
-               if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+               if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
                        val = S2MPS14_ENABLE_SUSPEND;
                else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
                        val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
                        val = rdev->desc->enable_mask;
                break;
        case S2MPU02:
-               if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+               if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
                        val = S2MPU02_ENABLE_SUSPEND;
                else
                        val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
        if (ret < 0)
                return ret;
 
-       s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
+       set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
        /*
         * Don't enable suspend mode if regulator is already disabled because
         * this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
        case S2MPS11X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
                regulators = s2mps11_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPS13X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
                regulators = s2mps13_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPS14X:
                s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
                regulators = s2mps14_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        case S2MPU02:
                s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
                regulators = s2mpu02_regulators;
+               BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
                break;
        default:
                dev_err(&pdev->dev, "Invalid device type: %u\n",
index 4b62d1a875e43eb09bf4631f7695f31dda9bfa6d..2b08cac62f07a5c6fed07b0598025234befe2c18 100644 (file)
@@ -88,7 +88,7 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
        struct armada38x_rtc *rtc = dev_get_drvdata(dev);
        int ret = 0;
-       unsigned long time, flags;
+       unsigned long time;
 
        ret = rtc_tm_to_time(tm, &time);
 
index c0090b698ff363732b2e1b0a727de001bc6c26a6..eab230be5a54fdfcfd9c41f39e0d9f1cb515a9e4 100644 (file)
@@ -343,6 +343,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
                goto out_dispose_irq;
        }
 
+       device_init_wakeup(&pdev->dev, 1);
+
        rtc->rtc_dev = rtc_device_register("mt6397-rtc", &pdev->dev,
                                           &mtk_rtc_ops, THIS_MODULE);
        if (IS_ERR(rtc->rtc_dev)) {
@@ -351,8 +353,6 @@ static int mtk_rtc_probe(struct platform_device *pdev)
                goto out_free_irq;
        }
 
-       device_init_wakeup(&pdev->dev, 1);
-
        return 0;
 
 out_free_irq:
index 95bccfd3f169a8a3a167368f972cbbd08de7c869..e5225ad9c5b12fdd9d723704d66eb9ec2ff5b734 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the S/390 specific device drivers
 #
 
-obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
 
 drivers-y += drivers/s390/built-in.o
 
index 1aec8ff0b58743214ff2ad520dea009d0a1f3f96..f73d2f579a7ef26f053eb5d314dd71e54ed60b82 100644 (file)
@@ -1862,6 +1862,33 @@ static void __dasd_device_check_expire(struct dasd_device *device)
        }
 }
 
+/*
+ * return 1 when device is not eligible for IO
+ */
+static int __dasd_device_is_unusable(struct dasd_device *device,
+                                    struct dasd_ccw_req *cqr)
+{
+       int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM);
+
+       if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
+               /* dasd is being set offline. */
+               return 1;
+       }
+       if (device->stopped) {
+               if (device->stopped & mask) {
+                       /* stopped and CQR will not change that. */
+                       return 1;
+               }
+               if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
+                       /* CQR is not able to change device to
+                        * operational. */
+                       return 1;
+               }
+               /* CQR required to get device operational. */
+       }
+       return 0;
+}
+
 /*
  * Take a look at the first request on the ccw queue and check
  * if it needs to be started.
@@ -1876,13 +1903,8 @@ static void __dasd_device_start_head(struct dasd_device *device)
        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
        if (cqr->status != DASD_CQR_QUEUED)
                return;
-       /* when device is stopped, return request to previous layer
-        * exception: only the disconnect or unresumed bits are set and the
-        * cqr is a path verification request
-        */
-       if (device->stopped &&
-           !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM))
-             && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) {
+       /* if device is not usable return request to upper layer */
+       if (__dasd_device_is_unusable(device, cqr)) {
                cqr->intrc = -EAGAIN;
                cqr->status = DASD_CQR_CLEARED;
                dasd_schedule_device_bh(device);
index a2597e683e790237d45db2dfd4b4a5ad655d85e9..ee3a6faae22a0b07a8eb8c40684e9d36c825bcfe 100644 (file)
@@ -699,7 +699,8 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
                                               struct dasd_device, alias_list);
        spin_unlock_irqrestore(&lcu->lock, flags);
        alias_priv = (struct dasd_eckd_private *) alias_device->private;
-       if ((alias_priv->count < private->count) && !alias_device->stopped)
+       if ((alias_priv->count < private->count) && !alias_device->stopped &&
+           !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
                return alias_device;
        else
                return NULL;
index aeed7969fd792ba35f89cbf3b2f26e357b20de16..7bc6df3100efa8e507641172491b9d0039b9d6dd 100644 (file)
@@ -7,6 +7,7 @@
 #define KMSG_COMPONENT "sclp_early"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/errno.h>
 #include <asm/ctl_reg.h>
 #include <asm/sclp.h>
 #include <asm/ipl.h>
index 08f1830cbfc4020e5901b9ae1f12fae518f29ac5..01bf1f5cf2e95a7f40f722f51e4df87ea257e724 100644 (file)
@@ -54,6 +54,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
                   "Copyright IBM Corp. 2001, 2012");
 MODULE_LICENSE("GPL");
 
+static int zcrypt_hwrng_seed = 1;
+module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
+MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
+
 static DEFINE_SPINLOCK(zcrypt_device_lock);
 static LIST_HEAD(zcrypt_device_list);
 static int zcrypt_device_count = 0;
@@ -1373,6 +1377,7 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
 static struct hwrng zcrypt_rng_dev = {
        .name           = "zcrypt",
        .data_read      = zcrypt_rng_data_read,
+       .quality        = 990,
 };
 
 static int zcrypt_rng_device_add(void)
@@ -1387,6 +1392,8 @@ static int zcrypt_rng_device_add(void)
                        goto out;
                }
                zcrypt_rng_buffer_index = 0;
+               if (!zcrypt_hwrng_seed)
+                       zcrypt_rng_dev.quality = 0;
                rc = hwrng_register(&zcrypt_rng_dev);
                if (rc)
                        goto out_free;
index 82b92c414a9cfe3152933e9cfc191c79c480568c..437254e1c4dee0c9972ff723ad31575911002993 100644 (file)
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
                ql_log(ql_log_info, vha, 0x706f,
                    "Issuing MPI reset.\n");
 
-               if (IS_QLA83XX(ha)) {
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                        uint32_t idc_control;
 
                        qla83xx_idc_lock(vha, 0);
index 0e6ee3ca30e667591db375ba73d9009b286ec93b..8b011aef12bd5ec6e72f5d7a57bfa2285ae64404 100644 (file)
  * |                              |                    | 0xd031-0xd0ff |
  * |                              |                    | 0xd101-0xd1fe |
  * |                              |                    | 0xd214-0xd2fe |
- * | Target Mode                 |       0xe079       |                |
- * | Target Mode Management      |       0xf072       | 0xf002         |
+ * | Target Mode                 |       0xe080       |                |
+ * | Target Mode Management      |       0xf096       | 0xf002         |
  * |                              |                    | 0xf046-0xf049  |
- * | Target Mode Task Management  |      0x1000b      |                |
+ * | Target Mode Task Management  |      0x1000d      |                |
  * ----------------------------------------------------------------------
  */
 
index e86201d3b8c6d9eccbd1132e96cffb08310b7988..9ad819edcd67af2c73ca5a0a85a8863398b3c6f7 100644 (file)
 #define RESPONSE_ENTRY_CNT_FX00                256     /* Number of response entries.*/
 
 struct req_que;
+struct qla_tgt_sess;
 
 /*
  * (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
        uint16_t port_id;
 
        unsigned long retry_delay_timestamp;
+       struct qla_tgt_sess *tgt_session;
 } fc_port_t;
 
 #include "qla_mr.h"
@@ -3154,13 +3156,13 @@ struct qla_hw_data {
 /* Bit 21 of fw_attributes decides the MCTP capabilities */
 #define IS_MCTP_CAPABLE(ha)    (IS_QLA2031(ha) && \
                                ((ha)->fw_attributes_ext[0] & BIT_0))
-#define IS_PI_UNINIT_CAPABLE(ha)       (IS_QLA83XX(ha))
-#define IS_PI_IPGUARD_CAPABLE(ha)      (IS_QLA83XX(ha))
+#define IS_PI_UNINIT_CAPABLE(ha)       (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_PI_IPGUARD_CAPABLE(ha)      (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_PI_DIFB_DIX0_CAPABLE(ha)    (0)
-#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha))
+#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha)        (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_PI_SPLIT_DET_CAPABLE(ha)    (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
     (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
-#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 #define IS_TGT_MODE_CAPABLE(ha)        (ha->tgt.atio_q_length)
 #define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))
 #define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
        uint16_t        fcoe_fcf_idx;
        uint8_t         fcoe_vn_port_mac[6];
 
+       /* list of commands waiting on workqueue */
+       struct list_head        qla_cmd_list;
+       struct list_head        qla_sess_op_cmd_list;
+       spinlock_t              cmd_list_lock;
+
+       /* Counter to detect races between ELS and RSCN events */
+       atomic_t                generation_tick;
+       /* Time when global fcport update has been scheduled */
+       int                     total_fcport_update_gen;
+
        uint32_t        vp_abort_cnt;
 
        struct fc_vport *fc_vport;      /* holds fc_vport * for each vport */
index 664013115c9da7d7912d0b22fa34ddf91e7b977b..11f2f3279eab2984fe131510cc669b329f149bae 100644 (file)
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
                        QLA_LOGIO_LOGIN_RETRIED : 0;
                qla2x00_post_async_login_done_work(fcport->vha, fcport,
                        lio->u.logio.data);
+       } else if (sp->type == SRB_LOGOUT_CMD) {
+               qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
        }
 }
 
@@ -497,7 +499,10 @@ void
 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
     uint16_t *data)
 {
-       qla2x00_mark_device_lost(vha, fcport, 1, 0);
+       /* Don't re-login in target mode */
+       if (!fcport->tgt_session)
+               qla2x00_mark_device_lost(vha, fcport, 1, 0);
+       qlt_logo_completion_handler(fcport, data[0]);
        return;
 }
 
@@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                mem_size = (ha->fw_memory_size - 0x11000 + 1) *
                    sizeof(uint16_t);
        } else if (IS_FWI2_CAPABLE(ha)) {
-               if (IS_QLA83XX(ha))
+               if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
                        fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
                else if (IS_QLA81XX(ha))
                        fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                mem_size = (ha->fw_memory_size - 0x100000 + 1) *
                    sizeof(uint32_t);
                if (ha->mqenable) {
-                       if (!IS_QLA83XX(ha))
+                       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                                mq_size = sizeof(struct qla2xxx_mq_chain);
                        /*
                         * Allocate maximum buffer size for all queues.
@@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data)
 {
        fc_port_t *fcport = data;
        struct fc_rport *rport;
-       scsi_qla_host_t *vha = fcport->vha;
        unsigned long flags;
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
        rport = fcport->drport ? fcport->drport: fcport->rport;
        fcport->drport = NULL;
        spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
-       if (rport) {
+       if (rport)
                fc_remote_port_delete(rport);
-               /*
-                * Release the target mode FC NEXUS in qla_target.c code
-                * if target mod is enabled.
-                */
-               qlt_fc_port_deleted(vha, fcport);
-       }
 }
 
 /**
@@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
         * Create target mode FC NEXUS in qla_target.c if target mode is
         * enabled..
         */
+
        qlt_fc_port_added(vha, fcport);
 
        spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 
        if (IS_QLAFX00(vha->hw)) {
                qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-               qla2x00_reg_remote_port(vha, fcport);
-               return;
+               goto reg_port;
        }
        fcport->login_retry = 0;
        fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
        qla2x00_set_fcport_state(fcport, FCS_ONLINE);
        qla2x00_iidma_fcport(vha, fcport);
        qla24xx_update_fcport_fcp_prio(vha, fcport);
-       qla2x00_reg_remote_port(vha, fcport);
+
+reg_port:
+       if (qla_ini_mode_enabled(vha))
+               qla2x00_reg_remote_port(vha, fcport);
+       else {
+               /*
+                * Create target mode FC NEXUS in qla_target.c
+                */
+               qlt_fc_port_added(vha, fcport);
+       }
 }
 
 /*
@@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
        LIST_HEAD(new_fcports);
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+       int             discovery_gen;
 
        /* If FL port exists, then SNS is present */
        if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        fcport->scan_state = QLA_FCPORT_SCAN;
                }
 
+               /* Mark the time right before querying FW for connected ports.
+                * This process is long, asynchronous and by the time it's done,
+                * collected information might not be accurate anymore. E.g.
+                * disconnected port might have re-connected and a brand new
+                * session has been created. In this case session's generation
+                * will be newer than discovery_gen. */
+               qlt_do_generation_tick(vha, &discovery_gen);
+
                rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
                if (rval != QLA_SUCCESS)
                        break;
@@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                        if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
                                continue;
 
-                       if (fcport->scan_state == QLA_FCPORT_SCAN &&
-                           atomic_read(&fcport->state) == FCS_ONLINE) {
-                               qla2x00_mark_device_lost(vha, fcport,
-                                   ql2xplogiabsentdevice, 0);
-                               if (fcport->loop_id != FC_NO_LOOP_ID &&
-                                   (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
-                                   fcport->port_type != FCT_INITIATOR &&
-                                   fcport->port_type != FCT_BROADCAST) {
-                                       ha->isp_ops->fabric_logout(vha,
-                                           fcport->loop_id,
-                                           fcport->d_id.b.domain,
-                                           fcport->d_id.b.area,
-                                           fcport->d_id.b.al_pa);
-                                       qla2x00_clear_loop_id(fcport);
+                       if (fcport->scan_state == QLA_FCPORT_SCAN) {
+                               if (qla_ini_mode_enabled(base_vha) &&
+                                   atomic_read(&fcport->state) == FCS_ONLINE) {
+                                       qla2x00_mark_device_lost(vha, fcport,
+                                           ql2xplogiabsentdevice, 0);
+                                       if (fcport->loop_id != FC_NO_LOOP_ID &&
+                                           (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+                                           fcport->port_type != FCT_INITIATOR &&
+                                           fcport->port_type != FCT_BROADCAST) {
+                                               ha->isp_ops->fabric_logout(vha,
+                                                   fcport->loop_id,
+                                                   fcport->d_id.b.domain,
+                                                   fcport->d_id.b.area,
+                                                   fcport->d_id.b.al_pa);
+                                               qla2x00_clear_loop_id(fcport);
+                                       }
+                               } else if (!qla_ini_mode_enabled(base_vha)) {
+                                       /*
+                                        * In target mode, explicitly kill
+                                        * sessions and log out of devices
+                                        * that are gone, so that we don't
+                                        * end up with an initiator using the
+                                        * wrong ACL (if the fabric recycles
+                                        * an FC address and we have a stale
+                                        * session around) and so that we don't
+                                        * report initiators that are no longer
+                                        * on the fabric.
+                                        */
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
+                                           "port gone, logging out/killing session: "
+                                           "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
+                                           "scan_state %d\n",
+                                           fcport->port_name,
+                                           atomic_read(&fcport->state),
+                                           fcport->flags, fcport->fc4_type,
+                                           fcport->scan_state);
+                                       qlt_fc_port_deleted(vha, fcport,
+                                           discovery_gen);
                                }
                        }
                }
@@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                            (fcport->flags & FCF_LOGIN_NEEDED) == 0)
                                continue;
 
+                       /*
+                        * If we're not an initiator, skip looking for devices
+                        * and logging in.  There's no reason for us to do it,
+                        * and it seems to actively cause problems in target
+                        * mode if we race with the initiator logging into us
+                        * (we might get the "port ID used" status back from
+                        * our login command and log out the initiator, which
+                        * seems to cause havoc).
+                        */
+                       if (!qla_ini_mode_enabled(base_vha)) {
+                               if (fcport->scan_state == QLA_FCPORT_FOUND) {
+                                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
+                                           "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
+                                           "scan_state %d (initiator mode disabled; skipping "
+                                           "login)\n", fcport->port_name,
+                                           atomic_read(&fcport->state),
+                                           fcport->flags, fcport->fc4_type,
+                                           fcport->scan_state);
+                               }
+                               continue;
+                       }
+
                        if (fcport->loop_id == FC_NO_LOOP_ID) {
                                fcport->loop_id = next_loopid;
                                rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                            test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
                                break;
 
-                       /* Find a new loop ID to use. */
-                       fcport->loop_id = next_loopid;
-                       rval = qla2x00_find_new_loop_id(base_vha, fcport);
-                       if (rval != QLA_SUCCESS) {
-                               /* Ran out of IDs to use */
-                               break;
-                       }
+                       /*
+                        * If we're not an initiator, skip looking for devices
+                        * and logging in.  There's no reason for us to do it,
+                        * and it seems to actively cause problems in target
+                        * mode if we race with the initiator logging into us
+                        * (we might get the "port ID used" status back from
+                        * our login command and log out the initiator, which
+                        * seems to cause havoc).
+                        */
+                       if (qla_ini_mode_enabled(base_vha)) {
+                               /* Find a new loop ID to use. */
+                               fcport->loop_id = next_loopid;
+                               rval = qla2x00_find_new_loop_id(base_vha,
+                                   fcport);
+                               if (rval != QLA_SUCCESS) {
+                                       /* Ran out of IDs to use */
+                                       break;
+                               }
 
-                       /* Login and update database */
-                       qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+                               /* Login and update database */
+                               qla2x00_fabric_dev_login(vha, fcport,
+                                   &next_loopid);
+                       } else {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
+                                       "new port %8phC state 0x%x flags 0x%x fc4_type "
+                                       "0x%x scan_state %d (initiator mode disabled; "
+                                       "skipping login)\n",
+                                       fcport->port_name,
+                                       atomic_read(&fcport->state),
+                                       fcport->flags, fcport->fc4_type,
+                                       fcport->scan_state);
+                       }
 
                        list_move_tail(&fcport->list, &vha->vp_fcports);
                }
@@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                        fcport->fp_speed = new_fcport->fp_speed;
 
                        /*
-                        * If address the same and state FCS_ONLINE, nothing
-                        * changed.
+                        * If address the same and state FCS_ONLINE
+                        * (or in target mode), nothing changed.
                         */
                        if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
-                           atomic_read(&fcport->state) == FCS_ONLINE) {
+                           (atomic_read(&fcport->state) == FCS_ONLINE ||
+                            !qla_ini_mode_enabled(base_vha))) {
                                break;
                        }
 
@@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                         * Log it out if still logged in and mark it for
                         * relogin later.
                         */
+                       if (!qla_ini_mode_enabled(base_vha)) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
+                                        "port changed FC ID, %8phC"
+                                        " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
+                                        fcport->port_name,
+                                        fcport->d_id.b.domain,
+                                        fcport->d_id.b.area,
+                                        fcport->d_id.b.al_pa,
+                                        fcport->loop_id,
+                                        new_fcport->d_id.b.domain,
+                                        new_fcport->d_id.b.area,
+                                        new_fcport->d_id.b.al_pa);
+                               fcport->d_id.b24 = new_fcport->d_id.b24;
+                               break;
+                       }
+
                        fcport->d_id.b24 = new_fcport->d_id.b24;
                        fcport->flags |= FCF_LOGIN_NEEDED;
                        if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
                if (found)
                        continue;
                /* If device was not in our fcports list, then add it. */
+               new_fcport->scan_state = QLA_FCPORT_FOUND;
                list_add_tail(&new_fcport->list, new_fcports);
 
                /* Allocate a new replacement fcport. */
@@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
                            atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
                                spin_unlock_irqrestore(&ha->vport_slock, flags);
                                qla2x00_rport_del(fcport);
+
+                               /*
+                                * Release the target mode FC NEXUS in
+                                * qla_target.c, if target mod is enabled.
+                                */
+                               qlt_fc_port_deleted(vha, fcport,
+                                   base_vha->total_fcport_update_gen);
+
                                spin_lock_irqsave(&ha->vport_slock, flags);
                        }
                }
index 36fbd4c7af8f50e52cd0289bc31d9ccfca8bbe24..6f02b26a35cff5b06916273a0bf1c8643f523689 100644 (file)
@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
        logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        logio->control_flags =
            cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
+       if (!sp->fcport->tgt_session ||
+           !sp->fcport->tgt_session->keep_nport_handle)
+               logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
        logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
        logio->port_id[0] = sp->fcport->d_id.b.al_pa;
        logio->port_id[1] = sp->fcport->d_id.b.area;
index 02b1c1c5355b948db22e1e69d2ae513387d198fa..b2f713ad90346093b40b515b70be4062666c6b80 100644 (file)
@@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
                        *orig_iocb_cnt = mcp->mb[10];
                if (vha->hw->flags.npiv_supported && max_npiv_vports)
                        *max_npiv_vports = mcp->mb[11];
-               if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
+               if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
+                   IS_QLA27XX(vha->hw)) && max_fcfs)
                        *max_fcfs = mcp->mb[12];
        }
 
@@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        spin_lock_irqsave(&ha->hardware_lock, flags);
        if (!(rsp->options & BIT_0)) {
                WRT_REG_DWORD(rsp->rsp_q_out, 0);
-               if (!IS_QLA83XX(ha))
+               if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                        WRT_REG_DWORD(rsp->rsp_q_in, 0);
        }
 
@@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
        mbx_cmd_t *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
 
-       if (!IS_QLA83XX(ha))
+       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                return QLA_FUNCTION_FAILED;
 
        ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
index a28815b8276f090901498dafaf39d12c97db42eb..8a5cac8448c76518107d95744b39ea76717958f5 100644 (file)
@@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->mbx_count = MAILBOX_REGISTER_COUNT;
                req_length = REQUEST_ENTRY_CNT_24XX;
                rsp_length = RESPONSE_ENTRY_CNT_2300;
+               ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
                ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
                ha->gid_list_info_size = 8;
@@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
                spin_lock_irqsave(vha->host->host_lock, flags);
                fcport->drport = rport;
                spin_unlock_irqrestore(vha->host->host_lock, flags);
+               qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
                set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
                qla2xxx_wake_dpc(base_vha);
        } else {
-               fc_remote_port_delete(rport);
-               qlt_fc_port_deleted(vha, fcport);
+               int now;
+               if (rport)
+                       fc_remote_port_delete(rport);
+               qlt_do_generation_tick(vha, &now);
+               qlt_fc_port_deleted(vha, fcport, now);
        }
 }
 
@@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        INIT_LIST_HEAD(&vha->vp_fcports);
        INIT_LIST_HEAD(&vha->work_list);
        INIT_LIST_HEAD(&vha->list);
+       INIT_LIST_HEAD(&vha->qla_cmd_list);
+       INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
 
        spin_lock_init(&vha->work_lock);
+       spin_lock_init(&vha->cmd_list_lock);
 
        sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
        ql_dbg(ql_dbg_init, vha, 0x0041,
index 028e8c8a7de9a897ac68ad8c09a2d47be0276f5c..2feb5f38edcd98ec3607f18a020d1cedee5bfc40 100644 (file)
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
 {
        uint32_t led_select_value = 0;
 
-       if (!IS_QLA83XX(ha))
+       if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                goto out;
 
        if (ha->port_no == 0)
index b749026aa592445d70dd51056314ff29965578ca..58651ecbd88c206e0be3b996c007baeaddc8cb19 100644 (file)
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
        struct atio_from_isp *atio, uint16_t status, int qfull);
 static void qlt_disable_vha(struct scsi_qla_host *vha);
+static void qlt_clear_tgt_db(struct qla_tgt *tgt);
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *ntfy,
+       uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+       uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
 /*
  * Global Variables
  */
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
 static DEFINE_MUTEX(qla_tgt_mutex);
 static LIST_HEAD(qla_tgt_glist);
 
+/* This API intentionally takes dest as a parameter, rather than returning
+ * int value to avoid caller forgetting to issue wmb() after the store */
+void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
+{
+       scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+       *dest = atomic_inc_return(&base_vha->generation_tick);
+       /* memory barrier */
+       wmb();
+}
+
 /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
 static struct qla_tgt_sess *qlt_find_sess_by_port_name(
        struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
        struct qla_tgt *tgt = sess->tgt;
        struct scsi_qla_host *vha = sess->vha;
        struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
+       bool logout_started = false;
+       fc_port_t fcport;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
+               "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
+               " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
+               __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
+               sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+               sess->logout_on_delete, sess->keep_nport_handle,
+               sess->plogi_ack_needed);
 
        BUG_ON(!tgt);
+
+       if (sess->logout_on_delete) {
+               int rc;
+
+               memset(&fcport, 0, sizeof(fcport));
+               fcport.loop_id = sess->loop_id;
+               fcport.d_id = sess->s_id;
+               memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
+               fcport.vha = vha;
+               fcport.tgt_session = sess;
+
+               rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
+               if (rc != QLA_SUCCESS)
+                       ql_log(ql_log_warn, vha, 0xf085,
+                              "Schedule logo failed sess %p rc %d\n",
+                              sess, rc);
+               else
+                       logout_started = true;
+       }
+
        /*
         * Release the target session for FC Nexus from fabric module code.
         */
        if (sess->se_sess != NULL)
                ha->tgt.tgt_ops->free_session(sess);
 
+       if (logout_started) {
+               bool traced = false;
+
+               while (!ACCESS_ONCE(sess->logout_completed)) {
+                       if (!traced) {
+                               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
+                                       "%s: waiting for sess %p logout\n",
+                                       __func__, sess);
+                               traced = true;
+                       }
+                       msleep(100);
+               }
+
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
+                       "%s: sess %p logout completed\n",
+                       __func__, sess);
+       }
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       if (sess->plogi_ack_needed)
+               qlt_send_notify_ack(vha, &sess->tm_iocb,
+                                   0, 0, 0, 0, 0, 0);
+
+       list_del(&sess->sess_list_entry);
+
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
            "Unregistration of sess %p finished\n", sess);
 
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
 
        vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
 
-       list_del(&sess->sess_list_entry);
-       if (sess->deleted)
-               list_del(&sess->del_list_entry);
+       if (!list_empty(&sess->del_list_entry))
+               list_del_init(&sess->del_list_entry);
+       sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
 
        INIT_WORK(&sess->free_work, qlt_free_session_done);
        schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 
        loop_id = le16_to_cpu(n->u.isp24.nport_handle);
        if (loop_id == 0xFFFF) {
-#if 0 /* FIXME: Re-enable Global event handling.. */
                /* Global event */
-               atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
-               qlt_clear_tgt_db(ha->tgt.qla_tgt);
+               atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
+               qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
+#if 0 /* FIXME: do we need to choose a session here? */
                if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
                        sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
                            typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
        struct qla_tgt *tgt = sess->tgt;
        uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
 
-       if (sess->deleted)
-               return;
+       if (sess->deleted) {
+               /* Upgrade to unconditional deletion in case it was temporary */
+               if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
+                       list_del(&sess->del_list_entry);
+               else
+                       return;
+       }
 
        ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
            "Scheduling sess %p for deletion\n", sess);
-       list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
-       sess->deleted = 1;
 
-       if (immediate)
+       if (immediate) {
                dev_loss_tmo = 0;
+               sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+               list_add(&sess->del_list_entry, &tgt->del_sess_list);
+       } else {
+               sess->deleted = QLA_SESS_DELETION_PENDING;
+               list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+       }
 
        sess->expires = jiffies + dev_loss_tmo * HZ;
 
        ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
-           "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
-           "deletion in %u secs (expires: %lu) immed: %d\n",
-           sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
-           sess->expires, immediate);
+           "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
+           " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
+           sess->vha->vp_idx, sess->port_name, sess->loop_id,
+           sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+           dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
+           sess->generation);
 
        if (immediate)
-               schedule_delayed_work(&tgt->sess_del_work, 0);
+               mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
        else
                schedule_delayed_work(&tgt->sess_del_work,
                    sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
 /* ha->hardware_lock supposed to be held on entry */
 static void qlt_undelete_sess(struct qla_tgt_sess *sess)
 {
-       BUG_ON(!sess->deleted);
+       BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
 
-       list_del(&sess->del_list_entry);
+       list_del_init(&sess->del_list_entry);
        sess->deleted = 0;
 }
 
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
                    del_list_entry);
                elapsed = jiffies;
                if (time_after_eq(elapsed, sess->expires)) {
-                       qlt_undelete_sess(sess);
+                       /* No turning back */
+                       list_del_init(&sess->del_list_entry);
+                       sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
 
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
                            "Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
                            fcport->d_id.b.al_pa, fcport->d_id.b.area,
                            fcport->loop_id);
 
+                       /* Cannot undelete at this point */
+                       if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+                               spin_unlock_irqrestore(&ha->hardware_lock,
+                                   flags);
+                               return NULL;
+                       }
+
                        if (sess->deleted)
                                qlt_undelete_sess(sess);
 
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
 
                        if (sess->local && !local)
                                sess->local = 0;
+
+                       qlt_do_generation_tick(vha, &sess->generation);
+
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
                        return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
        sess->s_id = fcport->d_id;
        sess->loop_id = fcport->loop_id;
        sess->local = local;
+       INIT_LIST_HEAD(&sess->del_list_entry);
+
+       /* Under normal circumstances we want to logout from firmware when
+        * session eventually ends and release corresponding nport handle.
+        * In the exception cases (e.g. when new PLOGI is waiting) corresponding
+        * code will adjust these flags as necessary. */
+       sess->logout_on_delete = 1;
+       sess->keep_nport_handle = 0;
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
            "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
        spin_lock_irqsave(&ha->hardware_lock, flags);
        list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
        vha->vha_tgt.qla_tgt->sess_count++;
+       qlt_do_generation_tick(vha, &sess->generation);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
 }
 
 /*
- * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ * Called from qla2x00_reg_remote_port()
  */
 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
                mutex_unlock(&vha->vha_tgt.tgt_mutex);
 
                spin_lock_irqsave(&ha->hardware_lock, flags);
+       } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+               /* Point of no return */
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
        } else {
                kref_get(&sess->se_sess->sess_kref);
 
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
-void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+/*
+ * max_gen - specifies maximum session generation
+ * at which this deletion requestion is still valid
+ */
+void
+qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
 {
-       struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_sess *sess;
-       unsigned long flags;
 
        if (!vha->hw->tgt.tgt_ops)
                return;
 
-       if (!tgt || (fcport->port_type != FCT_INITIATOR))
+       if (!tgt)
                return;
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
        if (tgt->tgt_stop) {
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
                return;
        }
        sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
        if (!sess) {
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return;
+       }
+
+       if (max_gen - sess->generation < 0) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
+                   "Ignoring stale deletion request for se_sess %p / sess %p"
+                   " for port %8phC, req_gen %d, sess_gen %d\n",
+                   sess->se_sess, sess, sess->port_name, max_gen,
+                   sess->generation);
                return;
        }
 
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
 
        sess->local = 1;
        qlt_schedule_sess_for_deletion(sess, false);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
            FCP_TMF_CMPL, true);
 }
 
+static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
+{
+       struct qla_tgt_sess_op *op;
+       struct qla_tgt_cmd *cmd;
+
+       spin_lock(&vha->cmd_list_lock);
+
+       list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+               if (tag == op->atio.u.isp24.exchange_addr) {
+                       op->aborted = true;
+                       spin_unlock(&vha->cmd_list_lock);
+                       return 1;
+               }
+       }
+
+       list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+               if (tag == cmd->atio.u.isp24.exchange_addr) {
+                       cmd->state = QLA_TGT_STATE_ABORTED;
+                       spin_unlock(&vha->cmd_list_lock);
+                       return 1;
+               }
+       }
+
+       spin_unlock(&vha->cmd_list_lock);
+       return 0;
+}
+
+/* drop cmds for the given lun
+ * XXX only looks for cmds on the port through which lun reset was recieved
+ * XXX does not go through the list of other port (which may have cmds
+ *     for the same lun)
+ */
+static void abort_cmds_for_lun(struct scsi_qla_host *vha,
+                               uint32_t lun, uint8_t *s_id)
+{
+       struct qla_tgt_sess_op *op;
+       struct qla_tgt_cmd *cmd;
+       uint32_t key;
+
+       key = sid_to_key(s_id);
+       spin_lock(&vha->cmd_list_lock);
+       list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+               uint32_t op_key;
+               uint32_t op_lun;
+
+               op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+               op_lun = scsilun_to_int(
+                       (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+               if (op_key == key && op_lun == lun)
+                       op->aborted = true;
+       }
+       list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+               uint32_t cmd_key;
+               uint32_t cmd_lun;
+
+               cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+               cmd_lun = scsilun_to_int(
+                       (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
+               if (cmd_key == key && cmd_lun == lun)
+                       cmd->state = QLA_TGT_STATE_ABORTED;
+       }
+       spin_unlock(&vha->cmd_list_lock);
+}
+
 /* ha->hardware_lock supposed to be held on entry */
 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
        struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
        }
        spin_unlock(&se_sess->sess_cmd_lock);
 
-       if (!found_lun)
-               return -ENOENT;
+       /* cmd not in LIO lists, look in qla list */
+       if (!found_lun) {
+               if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
+                       /* send TASK_ABORT response immediately */
+                       qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
+                       return 0;
+               } else {
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
+                           "unable to find cmd in driver or LIO for tag 0x%x\n",
+                           abts->exchange_addr_to_abort);
+                       return -ENOENT;
+               }
+       }
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
            "qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
                return;
        }
 
+       if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+               qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+               return;
+       }
+
        rc = __qlt_24xx_handle_abts(vha, abts, sess);
        if (rc != 0) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
        struct qla_hw_data *ha = vha->hw;
        struct se_cmd *se_cmd = &cmd->se_cmd;
 
-       if (unlikely(cmd->aborted)) {
-               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
-                      "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
-                      vha->vp_idx, cmd, se_cmd, se_cmd->tag);
-
-               cmd->state = QLA_TGT_STATE_ABORTED;
-               cmd->cmd_flags |= BIT_6;
-
-               qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
-
-               /* !! At this point cmd could be already freed !! */
-               return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
-       }
-
        prm->cmd = cmd;
        prm->tgt = tgt;
        prm->rq_result = scsi_status;
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        unsigned long flags = 0;
        int res;
 
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+               cmd->state = QLA_TGT_STATE_PROCESSED;
+               if (cmd->sess->logout_completed)
+                       /* no need to terminate. FW already freed exchange. */
+                       qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+               else
+                       qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               return 0;
+       }
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        memset(&prm, 0, sizeof(prm));
        qlt_check_srr_debug(cmd, &xmit_type);
 
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
            &full_req_cnt);
        if (unlikely(res != 0)) {
-               if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
-                       return 0;
-
                return res;
        }
 
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                res = qlt_build_ctio_crc2_pkt(&prm, vha);
        else
                res = qlt_24xx_build_ctio_pkt(&prm, vha);
-       if (unlikely(res != 0))
+       if (unlikely(res != 0)) {
+               vha->req->cnt += full_req_cnt;
                goto out_unmap_unlock;
-
+       }
 
        pkt = (struct ctio7_to_24xx *)prm.pkt;
 
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
-       if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+       if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
+           (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
                /*
                 * Either a chip reset is active or this request was from
                 * previous life, just abort the processing.
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
        else
                res = qlt_24xx_build_ctio_pkt(&prm, vha);
 
-       if (unlikely(res != 0))
+       if (unlikely(res != 0)) {
+               vha->req->cnt += prm.req_cnt;
                goto out_unlock_free_unmap;
+       }
+
        pkt = (struct ctio7_to_24xx *)prm.pkt;
        pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
            CTIO7_FLAGS_STATUS_MODE_0);
@@ -2649,6 +2848,89 @@ out:
 }
 
 
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *ntfy)
+{
+       struct nack_to_isp *nack;
+       struct qla_hw_data *ha = vha->hw;
+       request_t *pkt;
+       int ret = 0;
+
+       ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
+           "Sending TERM ELS CTIO (ha=%p)\n", ha);
+
+       pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+       if (pkt == NULL) {
+               ql_dbg(ql_dbg_tgt, vha, 0xe080,
+                   "qla_target(%d): %s failed: unable to allocate "
+                   "request packet\n", vha->vp_idx, __func__);
+               return -ENOMEM;
+       }
+
+       pkt->entry_type = NOTIFY_ACK_TYPE;
+       pkt->entry_count = 1;
+       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+       nack = (struct nack_to_isp *)pkt;
+       nack->ox_id = ntfy->ox_id;
+
+       nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+       if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+               nack->u.isp24.flags = ntfy->u.isp24.flags &
+                       __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+       }
+
+       /* terminate */
+       nack->u.isp24.flags |=
+               __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
+
+       nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+       nack->u.isp24.status = ntfy->u.isp24.status;
+       nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+       nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+       nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+       nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+       nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+       nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+       qla2x00_start_iocbs(vha, vha->req);
+       return ret;
+}
+
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+       struct imm_ntfy_from_isp *imm, int ha_locked)
+{
+       unsigned long flags = 0;
+       int rc;
+
+       if (qlt_issue_marker(vha, ha_locked) < 0)
+               return;
+
+       if (ha_locked) {
+               rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0  /* Todo  */
+               if (rc == -ENOMEM)
+                       qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+               goto done;
+       }
+
+       spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+       rc = __qlt_send_term_imm_notif(vha, imm);
+
+#if 0  /* Todo */
+       if (rc == -ENOMEM)
+               qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#endif
+
+done:
+       if (!ha_locked)
+               spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
 /* If hardware_lock held on entry, might drop it, then reaquire */
 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
 static void qlt_send_term_exchange(struct scsi_qla_host *vha,
        struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
 {
-       unsigned long flags;
+       unsigned long flags = 0;
        int rc;
 
        if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
        rc = __qlt_send_term_exchange(vha, cmd, atio);
        if (rc == -ENOMEM)
                qlt_alloc_qfull_cmd(vha, atio, 0, 0);
-       spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 
 done:
        if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
            !cmd->cmd_sent_to_fw)) {
-               if (!ha_locked && !in_interrupt())
-                       msleep(250); /* just in case */
-
-               qlt_unmap_sg(vha, cmd);
+               if (cmd->sg_mapped)
+                       qlt_unmap_sg(vha, cmd);
                vha->hw->tgt.tgt_ops->free_cmd(cmd);
        }
+
+       if (!ha_locked)
+               spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
        return;
 }
 
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
 
 }
 
+void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+{
+       struct qla_tgt *tgt = cmd->tgt;
+       struct scsi_qla_host *vha = tgt->vha;
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+           "qla_target(%d): terminating exchange for aborted cmd=%p "
+           "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
+           se_cmd->tag);
+
+       cmd->state = QLA_TGT_STATE_ABORTED;
+       cmd->cmd_flags |= BIT_6;
+
+       qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+}
+EXPORT_SYMBOL(qlt_abort_cmd);
+
 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
 {
        struct qla_tgt_sess *sess = cmd->sess;
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
                dump_stack();
        }
 
-       cmd->cmd_flags |= BIT_12;
+       cmd->cmd_flags |= BIT_17;
        ha->tgt.tgt_ops->free_cmd(cmd);
 }
 
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
 skip_term:
 
        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
-               ;
+               cmd->cmd_flags |= BIT_12;
        } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
                int rx_status = 0;
 
@@ -3191,9 +3492,11 @@ skip_term:
                ha->tgt.tgt_ops->handle_data(cmd);
                return;
        } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+               cmd->cmd_flags |= BIT_18;
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
                  "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
        } else {
+               cmd->cmd_flags |= BIT_19;
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
                    "qla_target(%d): A command in state (%d) should "
                    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3508,6 @@ skip_term:
                dump_stack();
        }
 
-
        ha->tgt.tgt_ops->free_cmd(cmd);
 }
 
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
        if (tgt->tgt_stop)
                goto out_term;
 
+       if (cmd->state == QLA_TGT_STATE_ABORTED) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
+                   "cmd with tag %u is aborted\n",
+                   cmd->atio.u.isp24.exchange_addr);
+               goto out_term;
+       }
+
        cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
        cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
        cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3625,12 @@ out_term:
 static void qlt_do_work(struct work_struct *work)
 {
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       scsi_qla_host_t *vha = cmd->vha;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vha->cmd_list_lock, flags);
+       list_del(&cmd->cmd_list);
+       spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
 
        __qlt_do_work(cmd);
 }
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
        cmd->loop_id = sess->loop_id;
        cmd->conf_compl_supported = sess->conf_compl_supported;
 
+       cmd->cmd_flags = 0;
+       cmd->jiffies_at_alloc = get_jiffies_64();
+
+       cmd->reset_count = vha->hw->chip_reset;
+
        return cmd;
 }
 
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
        unsigned long flags;
        uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
 
+       spin_lock_irqsave(&vha->cmd_list_lock, flags);
+       list_del(&op->cmd_list);
+       spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+       if (op->aborted) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
+                   "sess_op with tag %u is aborted\n",
+                   op->atio.u.isp24.exchange_addr);
+               goto out_term;
+       }
+
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
-               "qla_target(%d): Unable to find wwn login"
-               " (s_id %x:%x:%x), trying to create it manually\n",
-               vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+           "qla_target(%d): Unable to find wwn login"
+           " (s_id %x:%x:%x), trying to create it manually\n",
+           vha->vp_idx, s_id[0], s_id[1], s_id[2]);
 
        if (op->atio.u.raw.entry_count > 1) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
-                       "Dropping multy entry atio %p\n", &op->atio);
+                   "Dropping multy entry atio %p\n", &op->atio);
                goto out_term;
        }
 
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
 
                memcpy(&op->atio, atio, sizeof(*atio));
                op->vha = vha;
+
+               spin_lock(&vha->cmd_list_lock);
+               list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
+               spin_unlock(&vha->cmd_list_lock);
+
                INIT_WORK(&op->work, qlt_create_sess_from_atio);
                queue_work(qla_tgt_wq, &op->work);
                return 0;
        }
+
+       /* Another WWN used to have our s_id. Our PLOGI scheduled its
+        * session deletion, but it's still in sess_del_work wq */
+       if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+               ql_dbg(ql_dbg_io, vha, 0x3061,
+                   "New command while old session %p is being deleted\n",
+                   sess);
+               return -EFAULT;
+       }
+
        /*
         * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
         */
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
                return -ENOMEM;
        }
 
-       cmd->cmd_flags = 0;
-       cmd->jiffies_at_alloc = get_jiffies_64();
-
-       cmd->reset_count = vha->hw->chip_reset;
-
        cmd->cmd_in_wq = 1;
        cmd->cmd_flags |= BIT_0;
+
+       spin_lock(&vha->cmd_list_lock);
+       list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
+       spin_unlock(&vha->cmd_list_lock);
+
        INIT_WORK(&cmd->work, qlt_do_work);
        queue_work(qla_tgt_wq, &cmd->work);
        return 0;
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
        struct scsi_qla_host *vha = sess->vha;
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt_mgmt_cmd *mcmd;
+       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
        int res;
        uint8_t tmr_func;
 
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
                ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
                    "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
                tmr_func = TMR_LUN_RESET;
+               abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
                break;
 
        case QLA_TGT_CLEAR_TS:
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
                    sizeof(struct atio_from_isp));
        }
 
+       if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
+               return -EFAULT;
+
        return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
 }
 
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
        return __qlt_abort_task(vha, iocb, sess);
 }
 
+void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
+{
+       if (fcport->tgt_session) {
+               if (rc != MBS_COMMAND_COMPLETE) {
+                       ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
+                               "%s: se_sess %p / sess %p from"
+                               " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
+                               " LOGO failed: %#x\n",
+                               __func__,
+                               fcport->tgt_session->se_sess,
+                               fcport->tgt_session,
+                               fcport->port_name, fcport->loop_id,
+                               fcport->d_id.b.domain, fcport->d_id.b.area,
+                               fcport->d_id.b.al_pa, rc);
+               }
+
+               fcport->tgt_session->logout_completed = 1;
+       }
+}
+
+static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
+    struct imm_ntfy_from_isp *b)
+{
+       struct imm_ntfy_from_isp tmp;
+       memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
+       memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
+       memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
+}
+
+/*
+* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
+*
+* Schedules sessions with matching port_id/loop_id but different wwn for
+* deletion. Returns existing session with matching wwn if present.
+* Null otherwise.
+*/
+static struct qla_tgt_sess *
+qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
+    port_id_t port_id, uint16_t loop_id)
+{
+       struct qla_tgt_sess *sess = NULL, *other_sess;
+       uint64_t other_wwn;
+
+       list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
+
+               other_wwn = wwn_to_u64(other_sess->port_name);
+
+               if (wwn == other_wwn) {
+                       WARN_ON(sess);
+                       sess = other_sess;
+                       continue;
+               }
+
+               /* find other sess with nport_id collision */
+               if (port_id.b24 == other_sess->s_id.b24) {
+                       if (loop_id != other_sess->loop_id) {
+                               ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
+                                   "Invalidating sess %p loop_id %d wwn %llx.\n",
+                                   other_sess, other_sess->loop_id, other_wwn);
+
+                               /*
+                                * logout_on_delete is set by default, but another
+                                * session that has the same s_id/loop_id combo
+                                * might have cleared it when requested this session
+                                * deletion, so don't touch it
+                                */
+                               qlt_schedule_sess_for_deletion(other_sess, true);
+                       } else {
+                               /*
+                                * Another wwn used to have our s_id/loop_id
+                                * combo - kill the session, but don't log out
+                                */
+                               sess->logout_on_delete = 0;
+                               qlt_schedule_sess_for_deletion(other_sess,
+                                   true);
+                       }
+                       continue;
+               }
+
+               /* find other sess with nport handle collision */
+               if (loop_id == other_sess->loop_id) {
+                       ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
+                              "Invalidating sess %p loop_id %d wwn %llx.\n",
+                              other_sess, other_sess->loop_id, other_wwn);
+
+                       /* Same loop_id but different s_id
+                        * Ok to kill and logout */
+                       qlt_schedule_sess_for_deletion(other_sess, true);
+               }
+       }
+
+       return sess;
+}
+
+/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
+static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
+{
+       struct qla_tgt_sess_op *op;
+       struct qla_tgt_cmd *cmd;
+       uint32_t key;
+       int count = 0;
+
+       key = (((u32)s_id->b.domain << 16) |
+              ((u32)s_id->b.area   <<  8) |
+              ((u32)s_id->b.al_pa));
+
+       spin_lock(&vha->cmd_list_lock);
+       list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
+               uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+               if (op_key == key) {
+                       op->aborted = true;
+                       count++;
+               }
+       }
+       list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+               uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
+               if (cmd_key == key) {
+                       cmd->state = QLA_TGT_STATE_ABORTED;
+                       count++;
+               }
+       }
+       spin_unlock(&vha->cmd_list_lock);
+
+       return count;
+}
+
 /*
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
        struct imm_ntfy_from_isp *iocb)
 {
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess = NULL;
+       uint64_t wwn;
+       port_id_t port_id;
+       uint16_t loop_id;
+       uint16_t wd3_lo;
        int res = 0;
 
+       wwn = wwn_to_u64(iocb->u.isp24.port_name);
+
+       port_id.b.domain = iocb->u.isp24.port_id[2];
+       port_id.b.area   = iocb->u.isp24.port_id[1];
+       port_id.b.al_pa  = iocb->u.isp24.port_id[0];
+       port_id.b.rsvd_1 = 0;
+
+       loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
            "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
            vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
 
+       /* res = 1 means ack at the end of thread
+        * res = 0 means ack async/later.
+        */
        switch (iocb->u.isp24.status_subcode) {
        case ELS_PLOGI:
-       case ELS_FLOGI:
+
+               /* Mark all stale commands in qla_tgt_wq for deletion */
+               abort_cmds_for_s_id(vha, &port_id);
+
+               if (wwn)
+                       sess = qlt_find_sess_invalidate_other(tgt, wwn,
+                           port_id, loop_id);
+
+               if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
+                       res = 1;
+                       break;
+               }
+
+               if (sess->plogi_ack_needed) {
+                       /*
+                        * Initiator sent another PLOGI before last PLOGI could
+                        * finish. Swap plogi iocbs and terminate old one
+                        * without acking, new one will get acked when session
+                        * deletion completes.
+                        */
+                       ql_log(ql_log_warn, sess->vha, 0xf094,
+                           "sess %p received double plogi.\n", sess);
+
+                       qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
+
+                       qlt_send_term_imm_notif(vha, iocb, 1);
+
+                       res = 0;
+                       break;
+               }
+
+               res = 0;
+
+               /*
+                * Save immediate Notif IOCB for Ack when sess is done
+                * and being deleted.
+                */
+               memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
+               sess->plogi_ack_needed  = 1;
+
+                /*
+                 * Under normal circumstances we want to release nport handle
+                 * during LOGO process to avoid nport handle leaks inside FW.
+                 * The exception is when LOGO is done while another PLOGI with
+                 * the same nport handle is waiting as might be the case here.
+                 * Note: there is always a possibily of a race where session
+                 * deletion has already started for other reasons (e.g. ACL
+                 * removal) and now PLOGI arrives:
+                 * 1. if PLOGI arrived in FW after nport handle has been freed,
+                 *    FW must have assigned this PLOGI a new/same handle and we
+                 *    can proceed ACK'ing it as usual when session deletion
+                 *    completes.
+                 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+                 *    bit reached it, the handle has now been released. We'll
+                 *    get an error when we ACK this PLOGI. Nothing will be sent
+                 *    back to initiator. Initiator should eventually retry
+                 *    PLOGI and situation will correct itself.
+                 */
+               sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+                                          (sess->s_id.b24 == port_id.b24));
+               qlt_schedule_sess_for_deletion(sess, true);
+               break;
+
        case ELS_PRLI:
+               wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
+
+               if (wwn)
+                       sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
+                           loop_id);
+
+               if (sess != NULL) {
+                       if (sess->deleted) {
+                               /*
+                                * Impatient initiator sent PRLI before last
+                                * PLOGI could finish. Will force him to re-try,
+                                * while last one finishes.
+                                */
+                               ql_log(ql_log_warn, sess->vha, 0xf095,
+                                   "sess %p PRLI received, before plogi ack.\n",
+                                   sess);
+                               qlt_send_term_imm_notif(vha, iocb, 1);
+                               res = 0;
+                               break;
+                       }
+
+                       /*
+                        * This shouldn't happen under normal circumstances,
+                        * since we have deleted the old session during PLOGI
+                        */
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
+                           "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
+                           sess->loop_id, sess, iocb->u.isp24.nport_handle);
+
+                       sess->local = 0;
+                       sess->loop_id = loop_id;
+                       sess->s_id = port_id;
+
+                       if (wd3_lo & BIT_7)
+                               sess->conf_compl_supported = 1;
+
+               }
+               res = 1; /* send notify ack */
+
+               /* Make session global (not used in fabric mode) */
+               if (ha->current_topology != ISP_CFG_F) {
+                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+                       set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               } else {
+                       /* todo: else - create sess here. */
+                       res = 1; /* send notify ack */
+               }
+
+               break;
+
        case ELS_LOGO:
        case ELS_PRLO:
                res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
                break;
        }
 
+       case ELS_FLOGI: /* should never happen */
        default:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
                    "qla_target(%d): Unsupported ELS command %x "
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
                if (!sess)
                        goto out_term;
        } else {
+               if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+                       sess = NULL;
+                       goto out_term;
+               }
+
                kref_get(&sess->se_sess->sess_kref);
        }
 
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
                if (!sess)
                        goto out_term;
        } else {
+               if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+                       sess = NULL;
+                       goto out_term;
+               }
+
                kref_get(&sess->se_sess->sess_kref);
        }
 
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
 
        /* Adjust ring index */
        WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
+       RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
 }
 
 void
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
        if (!QLA_TGT_MODE_ENABLED())
                return;
 
-       if  (ha->mqenable || IS_QLA83XX(ha)) {
+       if  (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
                ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
        } else {
index 985d76dd706b71b3da1b3685169a21c24af90fa0..bca584ae45b7eccc3da35a11c9583c324524a802 100644 (file)
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
                        uint32_t srr_rel_offs;
                        uint16_t srr_ui;
                        uint16_t srr_ox_id;
-                       uint8_t  reserved_4[19];
+                       union {
+                               struct {
+                                       uint8_t node_name[8];
+                               } plogi; /* PLOGI/ADISC/PDISC */
+                               struct {
+                                       /* PRLI word 3 bit 0-15 */
+                                       uint16_t wd3_lo;
+                                       uint8_t resv0[6];
+                               } prli;
+                               struct {
+                                       uint8_t port_id[3];
+                                       uint8_t resv1;
+                                       uint16_t nport_handle;
+                                       uint16_t resv2;
+                               } req_els;
+                       } u;
+                       uint8_t port_name[8];
+                       uint8_t resv3[3];
                        uint8_t  vp_index;
                        uint32_t reserved_5;
                        uint8_t  port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
        uint8_t  reserved[2];
        uint16_t ox_id;
 } __packed;
+#define NOTIFY_ACK_FLAGS_TERMINATE     BIT_3
 #define NOTIFY_ACK_SRR_FLAGS_ACCEPT    0
 #define NOTIFY_ACK_SRR_FLAGS_REJECT    1
 
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
 #define        FC_TM_REJECT                4
 #define FC_TM_FAILED                5
 
-/*
- * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
- * terminated, so no more actions is needed and success should be returned
- * to target.
- */
-#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED      0x1717
-
 #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
 #define pci_dma_lo32(a) (a & 0xffffffff)
 #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
        struct scsi_qla_host *vha;
        struct atio_from_isp atio;
        struct work_struct work;
+       struct list_head cmd_list;
+       bool aborted;
+};
+
+enum qla_sess_deletion {
+       QLA_SESS_DELETION_NONE          = 0,
+       QLA_SESS_DELETION_PENDING       = 1, /* hopefully we can get rid of
+                                             * this one */
+       QLA_SESS_DELETION_IN_PROGRESS   = 2,
 };
 
 /*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
        port_id_t s_id;
 
        unsigned int conf_compl_supported:1;
-       unsigned int deleted:1;
+       unsigned int deleted:2;
        unsigned int local:1;
+       unsigned int logout_on_delete:1;
+       unsigned int plogi_ack_needed:1;
+       unsigned int keep_nport_handle:1;
+
+       unsigned char logout_completed;
+
+       int generation;
 
        struct se_session *se_sess;
        struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
 
        uint8_t port_name[WWN_SIZE];
        struct work_struct free_work;
+
+       union {
+               struct imm_ntfy_from_isp tm_iocb;
+       };
 };
 
 struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
        unsigned int conf_compl_supported:1;
        unsigned int sg_mapped:1;
        unsigned int free_sg:1;
-       unsigned int aborted:1; /* Needed in case of SRR */
        unsigned int write_data_transferred:1;
        unsigned int ctx_dsd_alloced:1;
        unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
         * BIT_14 - Back end data received/sent.
         * BIT_15 - SRR prepare ctio
         * BIT_16 - complete free
+        * BIT_17 - flush - qlt_abort_cmd_on_host_reset
+        * BIT_18 - completion w/abort status
+        * BIT_19 - completion w/unknown status
         */
        uint32_t cmd_flags;
 };
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
        struct qla_tgt_cmd *cmd;
 };
 
+/* Check for Switch reserved address */
+#define IS_SW_RESV_ADDR(_s_id) \
+       ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
+
 #define QLA_TGT_XMIT_DATA              1
 #define QLA_TGT_XMIT_STATUS            2
 #define QLA_TGT_XMIT_ALL               (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
 extern void qlt_lport_deregister(struct scsi_qla_host *);
 extern void qlt_unreg_sess(struct qla_tgt_sess *);
 extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
-extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
 extern int __init qlt_init(void);
 extern void qlt_exit(void);
 extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
                ha->host->active_mode |= MODE_INITIATOR;
 }
 
+static inline uint32_t sid_to_key(const uint8_t *s_id)
+{
+       uint32_t key;
+
+       key = (((unsigned long)s_id[0] << 16) |
+              ((unsigned long)s_id[1] << 8) |
+              (unsigned long)s_id[2]);
+       return key;
+}
+
 /*
  * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
  */
 extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
 extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
 extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_abort_cmd(struct qla_tgt_cmd *);
 extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
 extern irqreturn_t qla83xx_msix_atio_q(int, void *);
 extern void qlt_83xx_iospace_config(struct qla_hw_data *);
 extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
+extern void qlt_logo_completion_handler(fc_port_t *, int);
+extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
 
 #endif /* __QLA_TARGET_H */
index d9a8c6084346759778c5cd8506c42d4ead19e2cc..9224a06646e6af420139ae29cba4802cf2663637 100644 (file)
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-
+       cmd->cmd_flags |= BIT_3;
        cmd->bufflen = se_cmd->data_length;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
 
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
            se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
                wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
-                                               3000);
+                                           3 * HZ);
                return 0;
        }
        spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
        cmd->cmd_flags |= BIT_4;
        cmd->bufflen = se_cmd->data_length;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
-       cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
 
        cmd->sg_cnt = se_cmd->t_data_nents;
        cmd->sg = se_cmd->t_data_sg;
        cmd->offset = 0;
-       cmd->cmd_flags |= BIT_3;
 
        cmd->prot_sg_cnt = se_cmd->t_prot_nents;
        cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
        cmd->sg_cnt = 0;
        cmd->offset = 0;
        cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
-       cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
        if (cmd->cmd_flags &  BIT_5) {
                pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
                dump_stack();
@@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-       struct scsi_qla_host *vha = cmd->vha;
-       struct qla_hw_data *ha = vha->hw;
-
-       if (!cmd->sg_mapped)
-               return;
-
-       pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
-       cmd->sg_mapped = 0;
+       qlt_abort_cmd(cmd);
 }
 
 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
                return NULL;
        }
 
-       key = (((unsigned long)s_id[0] << 16) |
-              ((unsigned long)s_id[1] << 8) |
-              (unsigned long)s_id[2]);
+       key = sid_to_key(s_id);
        pr_debug("find_sess_by_s_id: 0x%06x\n", key);
 
        se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
        void *slot;
        int rc;
 
-       key = (((unsigned long)s_id[0] << 16) |
-              ((unsigned long)s_id[1] << 8) |
-              (unsigned long)s_id[2]);
+       key = sid_to_key(s_id);
        pr_debug("set_sess_by_s_id: %06x\n", key);
 
        slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
        }
 
        sess->conf_compl_supported = conf_compl_supported;
+
+       /* Reset logout parameters to default */
+       sess->logout_on_delete = 1;
+       sess->keep_nport_handle = 0;
 }
 
 /*
index 1ac38e73df7eec896cf2835fd8588a1db5278a46..9ad41168d26df1897814121766cd2e8fbd3a0243 100644 (file)
@@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
 
        depth = simple_strtoul(buf, NULL, 0);
 
-       if (depth < 1 || depth > sht->can_queue)
+       if (depth < 1 || depth > sdev->host->can_queue)
                return -EINVAL;
 
        retval = sht->change_queue_depth(sdev, depth);
index a85292b1d09d090261f89f2f6eed018f6277abb9..e3cd3ece44121c7d8ee3806a40a42acf069f6958 100644 (file)
@@ -203,7 +203,7 @@ static ssize_t srp_show_tmo(char *buf, int tmo)
        return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n");
 }
 
-static int srp_parse_tmo(int *tmo, const char *buf)
+int srp_parse_tmo(int *tmo, const char *buf)
 {
        int res = 0;
 
@@ -214,6 +214,7 @@ static int srp_parse_tmo(int *tmo, const char *buf)
 
        return res;
 }
+EXPORT_SYMBOL(srp_parse_tmo);
 
 static ssize_t show_reconnect_delay(struct device *dev,
                                    struct device_attribute *attr, char *buf)
index 3f25b8fa921d69bff68ff6aab6e4c0b1f6ac65d7..871f3553987d6956c34c250928081feb10e3b9a0 100644 (file)
@@ -1329,9 +1329,9 @@ static int st_open(struct inode *inode, struct file *filp)
        spin_lock(&st_use_lock);
        STp->in_use = 0;
        spin_unlock(&st_use_lock);
-       scsi_tape_put(STp);
        if (resumed)
                scsi_autopm_put_device(STp->device);
+       scsi_tape_put(STp);
        return retval;
 
 }
index 285f77544c36391a89b243001c3fc93d0703f46f..7dbbb29d24c6cf5290cd06a7a97df570a8860f18 100644 (file)
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
 {
        struct Scsi_Host *shost;
        struct virtio_scsi *vscsi;
-       int err, host_prot;
+       int err;
        u32 sg_elems, num_targets;
        u32 cmd_per_lun;
        u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
        if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+               int host_prot;
+
                host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
                            SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
                            SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
index 0cae1694014dfbbaba171187fdbd4fc835dc1546..b0f30fb68914220ea75b401dd52b7d537c00687e 100644 (file)
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
 
 config SPI_ZYNQMP_GQSPI
        tristate "Xilinx ZynqMP GQSPI controller"
-       depends on SPI_MASTER
+       depends on SPI_MASTER && HAS_DMA
        help
          Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
 
index 788e2b176a4f7707051bcc325538e7d2a6d599f4..acce90ac7371d58d94b47dc3cb1da029969fb95f 100644 (file)
@@ -40,6 +40,7 @@
 #define SPFI_CONTROL_SOFT_RESET                        BIT(11)
 #define SPFI_CONTROL_SEND_DMA                  BIT(10)
 #define SPFI_CONTROL_GET_DMA                   BIT(9)
+#define SPFI_CONTROL_SE                        BIT(8)
 #define SPFI_CONTROL_TMODE_SHIFT               5
 #define SPFI_CONTROL_TMODE_MASK                        0x7
 #define SPFI_CONTROL_TMODE_SINGLE              0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
        else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
                 xfer->rx_nbits == SPI_NBITS_QUAD)
                val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+       val |= SPFI_CONTROL_SE;
        spfi_writel(spfi, val, SPFI_CONTROL);
 }
 
index eb7d3a6fb14c0694b55e5286933256c5962043ce..f9deb84e4e551bdd9c42c8ac204c80c18f0f8579 100644 (file)
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
 {
        struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
 
-       if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
-           && (transfer->len > spi_imx->tx_wml))
+       if (spi_imx->dma_is_inited
+           && transfer->len > spi_imx->rx_wml * sizeof(u32)
+           && transfer->len > spi_imx->tx_wml * sizeof(u32))
                return true;
        return false;
 }
index 87b20a511a6ba66cc4ba084df028a3c073691aa2..f23f36ebaf3dc700447da2e4da6e9b1b28e8ed82 100644 (file)
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
        case GQSPI_SELECT_FLASH_CS_BOTH:
                instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
                        GQSPI_GENFIFO_CS_UPPER;
+               break;
        case GQSPI_SELECT_FLASH_CS_UPPER:
                instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
                break;
index dd616ff0ffc52542c3c8f8aaaf7c7d30bb9fd219..c7de64171c452325ab43884b501977a33f724b93 100644 (file)
@@ -693,6 +693,7 @@ static struct class *spidev_class;
 #ifdef CONFIG_OF
 static const struct of_device_id spidev_dt_ids[] = {
        { .compatible = "rohm,dh2228fv" },
+       { .compatible = "lineartechnology,ltc2488" },
        {},
 };
 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
index b8ee81840666ad35b33d2cf2182b50db83415810..3f287c48e08204b353c27ea132f0e121956eb19c 100644 (file)
@@ -1,6 +1,6 @@
 config STAGING_BOARD
        bool "Staging Board Support"
-       depends on OF_ADDRESS
+       depends on OF_ADDRESS && OF_IRQ && CLKDEV_LOOKUP
        help
          Select to enable per-board staging support code.
 
index 7125eb955ae50556f6fbd6c011aa1b8671d5f659..8a9d4a0de1298777d522a368a1ca6177cfdee06b 100644 (file)
@@ -31,7 +31,6 @@
 #define DEBUG_PORTAL_ALLOC
 #define DEBUG_SUBSYSTEM S_LND
 
-#include <asm/irq.h>
 #include <linux/crc32.h>
 #include <linux/errno.h>
 #include <linux/if.h>
index ed040fbb7df8d319ba39b61acf2009a163bf3745..b0c8e235b982164bb170b53655ccfc8d01378d6b 100644 (file)
@@ -1418,7 +1418,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 
        priv->current_aid = conf->aid;
 
-       if (changed & BSS_CHANGED_BSSID) {
+       if (changed & BSS_CHANGED_BSSID && conf->bssid) {
                unsigned long flags;
 
                spin_lock_irqsave(&priv->lock, flags);
index f97323f19acfd8c988a2fbefb8b7f260ef7ece34..af572d71813531fa89453f5692ef834cb0bd856b 100644 (file)
@@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 
        priv->current_aid = conf->aid;
 
-       if (changed & BSS_CHANGED_BSSID)
+       if (changed & BSS_CHANGED_BSSID && conf->bssid)
                vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
 
 
index 4e68b62193ed7781d3c32f4160878809acfc546d..cd77a064c772f1bbe897482d2372fe5bc6434328 100644 (file)
@@ -3998,7 +3998,13 @@ get_immediate:
        }
 
 transport_err:
-       iscsit_take_action_for_connection_exit(conn);
+       /*
+        * Avoid the normal connection failure code-path if this connection
+        * is still within LOGIN mode, and iscsi_np process context is
+        * responsible for cleaning up the early connection failure.
+        */
+       if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+               iscsit_take_action_for_connection_exit(conn);
 out:
        return 0;
 }
@@ -4082,7 +4088,7 @@ reject:
 
 int iscsi_target_rx_thread(void *arg)
 {
-       int ret;
+       int ret, rc;
        u8 buffer[ISCSI_HDR_LEN], opcode;
        u32 checksum = 0, digest = 0;
        struct iscsi_conn *conn = arg;
@@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg)
         * connection recovery / failure event can be triggered externally.
         */
        allow_signal(SIGINT);
+       /*
+        * Wait for iscsi_post_login_handler() to complete before allowing
+        * incoming iscsi/tcp socket I/O, and/or failing the connection.
+        */
+       rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+       if (rc < 0)
+               return 0;
 
        if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
                struct completion comp;
-               int rc;
 
                init_completion(&comp);
                rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession(
        struct iscsi_conn *conn)
 {
        struct iscsi_session *sess = conn->sess;
-       int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+       int sleep = 1;
+       /*
+        * Traditional iscsi/tcp will invoke this logic from TX thread
+        * context during session logout, so clear tx_thread_active and
+        * sleep if iscsit_close_connection() has not already occured.
+        *
+        * Since iser-target invokes this logic from it's own workqueue,
+        * always sleep waiting for RX/TX thread shutdown to complete
+        * within iscsit_close_connection().
+        */
+       if (conn->conn_transport->transport_type == ISCSI_TCP)
+               sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
        atomic_set(&conn->conn_logout_remove, 0);
        complete(&conn->conn_logout_comp);
@@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession(
 static void iscsit_logout_post_handler_samecid(
        struct iscsi_conn *conn)
 {
-       int sleep = cmpxchg(&conn->tx_thread_active, true, false);
+       int sleep = 1;
+
+       if (conn->conn_transport->transport_type == ISCSI_TCP)
+               sleep = cmpxchg(&conn->tx_thread_active, true, false);
 
        atomic_set(&conn->conn_logout_remove, 0);
        complete(&conn->conn_logout_comp);
@@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
        struct iscsi_session *sess;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
        struct se_session *se_sess, *se_sess_tmp;
+       LIST_HEAD(free_list);
        int session_count = 0;
 
        spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
                }
                atomic_set(&sess->session_reinstatement, 1);
                spin_unlock(&sess->conn_lock);
-               spin_unlock_bh(&se_tpg->session_lock);
 
-               iscsit_free_session(sess);
-               spin_lock_bh(&se_tpg->session_lock);
+               list_move_tail(&se_sess->sess_list, &free_list);
+       }
+       spin_unlock_bh(&se_tpg->session_lock);
+
+       list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
+               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
 
+               iscsit_free_session(sess);
                session_count++;
        }
-       spin_unlock_bh(&se_tpg->session_lock);
 
        pr_debug("Released %d iSCSI Session(s) from Target Portal"
                        " Group: %hu\n", session_count, tpg->tpgt);
index 3d0fe4ff55904d00a702958a82413a33873de888..7e8f65e5448fdbda5645e3d5a836ad9f81408efa 100644 (file)
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
        init_completion(&conn->conn_logout_comp);
        init_completion(&conn->rx_half_close_comp);
        init_completion(&conn->tx_half_close_comp);
+       init_completion(&conn->rx_login_comp);
        spin_lock_init(&conn->cmd_lock);
        spin_lock_init(&conn->conn_usage_lock);
        spin_lock_init(&conn->immed_queue_lock);
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
                iscsit_start_nopin_timer(conn);
 }
 
-static int iscsit_start_kthreads(struct iscsi_conn *conn)
+int iscsit_start_kthreads(struct iscsi_conn *conn)
 {
        int ret = 0;
 
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
 
        return 0;
 out_tx:
+       send_sig(SIGINT, conn->tx_thread, 1);
        kthread_stop(conn->tx_thread);
        conn->tx_thread_active = false;
 out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
        return ret;
 }
 
-int iscsi_post_login_handler(
+void iscsi_post_login_handler(
        struct iscsi_np *np,
        struct iscsi_conn *conn,
        u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
        struct se_session *se_sess = sess->se_sess;
        struct iscsi_portal_group *tpg = sess->tpg;
        struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
-       int rc;
 
        iscsit_inc_conn_usage_count(conn);
 
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
                        sess->sess_ops->InitiatorName);
                spin_unlock_bh(&sess->conn_lock);
 
-               rc = iscsit_start_kthreads(conn);
-               if (rc)
-                       return rc;
-
                iscsi_post_login_start_timers(conn);
                /*
                 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
                iscsit_thread_get_cpumask(conn);
                conn->conn_rx_reset_cpumask = 1;
                conn->conn_tx_reset_cpumask = 1;
-
+               /*
+                * Wakeup the sleeping iscsi_target_rx_thread() now that
+                * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+                */
+               complete(&conn->rx_login_comp);
                iscsit_dec_conn_usage_count(conn);
+
                if (stop_timer) {
                        spin_lock_bh(&se_tpg->session_lock);
                        iscsit_stop_time2retain_timer(sess);
                        spin_unlock_bh(&se_tpg->session_lock);
                }
                iscsit_dec_session_usage_count(sess);
-               return 0;
+               return;
        }
 
        iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
                " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
        spin_unlock_bh(&se_tpg->session_lock);
 
-       rc = iscsit_start_kthreads(conn);
-       if (rc)
-               return rc;
-
        iscsi_post_login_start_timers(conn);
        /*
         * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
        iscsit_thread_get_cpumask(conn);
        conn->conn_rx_reset_cpumask = 1;
        conn->conn_tx_reset_cpumask = 1;
-
+       /*
+        * Wakeup the sleeping iscsi_target_rx_thread() now that
+        * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
+        */
+       complete(&conn->rx_login_comp);
        iscsit_dec_conn_usage_count(conn);
-
-       return 0;
 }
 
 static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        if (ret < 0)
                goto new_sess_out;
 
-       if (!conn->sess) {
-               pr_err("struct iscsi_conn session pointer is NULL!\n");
-               goto new_sess_out;
-       }
-
        iscsi_stop_login_thread_timer(np);
 
-       if (signal_pending(current))
-               goto new_sess_out;
-
        if (ret == 1) {
                tpg_np = conn->tpg_np;
 
-               ret = iscsi_post_login_handler(np, conn, zero_tsih);
-               if (ret < 0)
-                       goto new_sess_out;
-
+               iscsi_post_login_handler(np, conn, zero_tsih);
                iscsit_deaccess_np(np, tpg, tpg_np);
        }
 
index 1c7358081533ad1e3fb0533f424fa7749feda7d5..57aa0d0fd820f330c271836ecdc02c5a067179b2 100644 (file)
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
 extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+extern int iscsit_start_kthreads(struct iscsi_conn *);
+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
                                bool, bool);
 extern int iscsi_target_login_thread(void *);
index 8c02fa34716fae5a40dbf8cb09357bba5df2e7bd..f9cde91418367071d08c3a3ebe08dc44a1a1abe3 100644 (file)
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 #include <linux/ctype.h>
+#include <linux/kthread.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
                ntohl(login_rsp->statsn), login->rsp_length);
 
        padding = ((-login->rsp_length) & 3);
+       /*
+        * Before sending the last login response containing the transition
+        * bit for full-feature-phase, go ahead and start up TX/RX threads
+        * now to avoid potential resource allocation failures after the
+        * final login response has been sent.
+        */
+       if (login->login_complete) {
+               int rc = iscsit_start_kthreads(conn);
+               if (rc) {
+                       iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                           ISCSI_LOGIN_STATUS_NO_RESOURCES);
+                       return -1;
+               }
+       }
 
        if (conn->conn_transport->iscsit_put_login_tx(conn, login,
                                        login->rsp_length + padding) < 0)
-               return -1;
+               goto err;
 
        login->rsp_length               = 0;
        mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
        mutex_unlock(&sess->cmdsn_mutex);
 
        return 0;
+
+err:
+       if (login->login_complete) {
+               if (conn->rx_thread && conn->rx_thread_active) {
+                       send_sig(SIGINT, conn->rx_thread, 1);
+                       kthread_stop(conn->rx_thread);
+               }
+               if (conn->tx_thread && conn->tx_thread_active) {
+                       send_sig(SIGINT, conn->tx_thread, 1);
+                       kthread_stop(conn->tx_thread);
+               }
+               spin_lock(&iscsit_global->ts_bitmap_lock);
+               bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+                                     get_order(1));
+               spin_unlock(&iscsit_global->ts_bitmap_lock);
+       }
+       return -1;
 }
 
 static void iscsi_target_sk_data_ready(struct sock *sk)
index 0b0de36474784987c781906243eabbed27ecab00..c2e9fea90b4a4bc16a0384d79fa9684c9f4176e0 100644 (file)
@@ -747,7 +747,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
        if (!dev->transport->init_prot || !dev->transport->free_prot) {
                /* 0 is only allowed value for non-supporting backends */
                if (flag == 0)
-                       return 0;
+                       return count;
 
                pr_err("DIF protection not supported by backend: %s\n",
                       dev->transport->name);
@@ -1590,9 +1590,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
        u8 type = 0;
 
        if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
-               return 0;
+               return count;
        if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
-               return 0;
+               return count;
 
        if (dev->export_count) {
                pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1658,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                 * PR APTPL Metadata for Reservation
                 */
                case Opt_res_holder:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        res_holder = arg;
                        break;
                case Opt_res_type:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        type = (u8)arg;
                        break;
                case Opt_res_scope:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        break;
                case Opt_res_all_tg_pt:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        all_tg_pt = (int)arg;
                        break;
                case Opt_mapped_lun:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        mapped_lun = (u64)arg;
                        break;
                /*
@@ -1701,14 +1711,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                        }
                        break;
                case Opt_tpgt:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        tpgt = (u16)arg;
                        break;
                case Opt_port_rtpi:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        break;
                case Opt_target_lun:
-                       match_int(args, &arg);
+                       ret = match_int(args, &arg);
+                       if (ret)
+                               goto out;
                        target_lun = (u64)arg;
                        break;
                default:
@@ -1985,7 +2001,7 @@ static ssize_t target_core_store_alua_lu_gp(
 
        lu_gp_mem = dev->dev_alua_lu_gp_mem;
        if (!lu_gp_mem)
-               return 0;
+               return count;
 
        if (count > LU_GROUP_NAME_BUF) {
                pr_err("ALUA LU Group Alias too large!\n");
index 0fdbe43b7dad99479f7288584a0d95815c4dab72..5ab7100de17eb5694b162403ef43585c4bbceaf1 100644 (file)
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
        LIST_HEAD(tid_dest_list);
        struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
        unsigned char *buf, *ptr, proto_ident;
-       const unsigned char *i_str;
+       const unsigned char *i_str = NULL;
        char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
        sense_reason_t ret;
        u32 tpdl, tid_len = 0;
index 4703f403f31c0dd6cc9b4d31422bbc16387b20e7..384cf88944113892135b3ff253ed97bdc6b9c8b4 100644 (file)
@@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
        dev->dev_attrib.hw_max_sectors = UINT_MAX;
        dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
+       dev->dev_attrib.is_nonrot = 1;
 
        rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
 
index b0744433315a80496a84d8d6f49e01300471f463..b5ba1ec3c35476361103d7dca47a1934cdd3289f 100644 (file)
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
                    cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
                        buf[4] = 0x5;
                else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
-                       cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
+                        cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
                        buf[4] = 0x4;
        }
 
+       /* logical unit supports type 1 and type 3 protection */
+       if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
+           (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
+           (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
+               buf[4] |= (0x3 << 3);
+       }
+
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
 
index c9c27f69e101cfc9246a81ed1f187148b08193e6..ee8bfacf20716481a23a60325b5c99ba9da35508 100644 (file)
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
  *     Locking: ctrl_lock
  */
 
-static void isig(int sig, struct tty_struct *tty)
+static void __isig(int sig, struct tty_struct *tty)
 {
-       struct n_tty_data *ldata = tty->disc_data;
        struct pid *tty_pgrp = tty_get_pgrp(tty);
        if (tty_pgrp) {
                kill_pgrp(tty_pgrp, sig, 1);
                put_pid(tty_pgrp);
        }
+}
 
-       if (!L_NOFLSH(tty)) {
+static void isig(int sig, struct tty_struct *tty)
+{
+       struct n_tty_data *ldata = tty->disc_data;
+
+       if (L_NOFLSH(tty)) {
+               /* signal only */
+               __isig(sig, tty);
+
+       } else { /* signal and flush */
                up_read(&tty->termios_rwsem);
                down_write(&tty->termios_rwsem);
 
+               __isig(sig, tty);
+
                /* clear echo buffer */
                mutex_lock(&ldata->output_lock);
                ldata->echo_head = ldata->echo_tail = 0;
index 76e65b714471d9eeb11ea12593adea987205daaa..15b4079a335e886ca62c422970c80ba8c93f9dfb 100644 (file)
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE
 config SERIAL_SC16IS7XX
         tristate "SC16IS7xx serial support"
         select SERIAL_CORE
-        depends on I2C || SPI_MASTER
+        depends on (SPI_MASTER && !I2C) || I2C
         help
           This selects support for SC16IS7xx serial ports.
           Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
index 50cf5b10ceed98022cbc44609acd280ef2beba81..fd27e986b1dd3437dfd2560ec11efd8def7bf254 100644 (file)
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
        void __iomem *base;
 
        base = devm_ioremap_resource(dev, mmiobase);
-       if (!base)
-               return -ENOMEM;
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        index = pl011_probe_dt_alias(index, dev);
 
index a57301a6fe427027788aa2153547928597c71ed1..679709f51fd4cfe4739fc2edb433cc4badaf0227 100644 (file)
@@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
 
        port = platform_get_drvdata(pdev);
        uart_remove_one_port(&etraxfs_uart_driver, port);
-       etraxfs_uart_ports[pdev->id] = NULL;
+       etraxfs_uart_ports[port->line] = NULL;
 
        return 0;
 }
index 2c90dc31bfaabc0242e168dd688faadadd9f164a..54fdc7866ea17423836827ee374c1ecfd0b5257d 100644 (file)
@@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port)
 
        writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
 
-       /* Can we enable the DMA support? */
-       if (is_imx6q_uart(sport) && !uart_console(port) &&
-           !sport->dma_is_inited)
-               imx_uart_dma_init(sport);
-
        spin_lock_irqsave(&sport->port.lock, flags);
        /* Reset fifo's and state machines */
        i = 100;
@@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port)
        writel(USR1_RTSD, sport->port.membase + USR1);
        writel(USR2_ORE, sport->port.membase + USR2);
 
-       if (sport->dma_is_inited && !sport->dma_is_enabled)
-               imx_enable_dma(sport);
-
        temp = readl(sport->port.membase + UCR1);
        temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
 
@@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
                        } else {
                                ucr2 |= UCR2_CTSC;
                        }
+
+                       /* Can we enable the DMA support? */
+                       if (is_imx6q_uart(sport) && !uart_console(port)
+                               && !sport->dma_is_inited)
+                               imx_uart_dma_init(sport);
                } else {
                        termios->c_cflag &= ~CRTSCTS;
                }
@@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
        if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
                imx_enable_ms(&sport->port);
 
+       if (sport->dma_is_inited && !sport->dma_is_enabled)
+               imx_enable_dma(sport);
        spin_unlock_irqrestore(&sport->port.lock, flags);
 }
 
index 9e6576004a427e51cac6f1b59237248a706332f8..5ccc698cbbfa1ad9bde91200ea5907753fbb4b78 100644 (file)
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
                     (reg << SC16IS7XX_REG_SHIFT) | port->line, val);
 }
 
+static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
+{
+       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+       u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+
+       regcache_cache_bypass(s->regmap, true);
+       regmap_raw_read(s->regmap, addr, s->buf, rxlen);
+       regcache_cache_bypass(s->regmap, false);
+}
+
+static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
+{
+       struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+       u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+
+       regcache_cache_bypass(s->regmap, true);
+       regmap_raw_write(s->regmap, addr, s->buf, to_send);
+       regcache_cache_bypass(s->regmap, false);
+}
+
 static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
                                  u8 mask, u8 val)
 {
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
                        s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
                        bytes_read = 1;
                } else {
-                       regcache_cache_bypass(s->regmap, true);
-                       regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
-                                       s->buf, rxlen);
-                       regcache_cache_bypass(s->regmap, false);
+                       sc16is7xx_fifo_read(port, rxlen);
                        bytes_read = rxlen;
                }
 
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
                        s->buf[i] = xmit->buf[xmit->tail];
                        xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
                }
-               regcache_cache_bypass(s->regmap, true);
-               regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send);
-               regcache_cache_bypass(s->regmap, false);
+
+               sc16is7xx_fifo_write(port, to_send);
        }
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
index 7ae1592f7ec9731c4dab1d3a12b980420aade634..f36852067f20e61ebaf67269ec08c37ea342c06a 100644 (file)
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
        mutex_lock(&port->mutex);
        uart_shutdown(tty, state);
        tty_port_tty_set(port, NULL);
-       tty->closing = 0;
+
        spin_lock_irqsave(&port->lock, flags);
 
        if (port->blocked_open) {
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
        mutex_unlock(&port->mutex);
 
        tty_ldisc_flush(tty);
+       tty->closing = 0;
 }
 
 static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
index ea27804d87af9ec2485b1043f8ad97b30c05713f..381a2b13682c1a587a81e9bab781bcccdc669332 100644 (file)
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
                        schedule();
                        continue;
                }
+               __set_current_state(TASK_RUNNING);
                count = sel_buffer_lth - pasted;
                count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
                                              count);
index 8fe52989b380155926c6865721e9ba16e2a60685..4462d167900c515abbf205b2b6134016c0933390 100644 (file)
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
        __module_get(vc->vc_sw->owner);
        vc->vc_num = num;
        vc->vc_display_fg = &master_display_fg;
+       if (vc->vc_uni_pagedir_loc)
+               con_free_unimap(vc);
        vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
        vc->vc_uni_pagedir = NULL;
        vc->vc_hi_font_mask = 0;
index 519a77ba214cce4c4580b54856c74ba9e3af5342..b30e7423549b04b0e7d442a128048b4221c68062 100644 (file)
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
        usb_deregister(&acm_driver);
        tty_unregister_driver(acm_tty_driver);
        put_tty_driver(acm_tty_driver);
+       idr_destroy(&acm_minors);
 }
 
 module_init(acm_init);
index 0e6f968e93fe8a9f31f7de7199fa778b60b2f900..01c0c0477a9e93d1c8e1150b80763f5ee6c90a02 100644 (file)
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
 {
        return bus_register(&ulpi_bus);
 }
-module_init(ulpi_init);
+subsys_initcall(ulpi_init);
 
 static void __exit ulpi_exit(void)
 {
index be5b2074f9066a8c9ca3e79289117f67d52b1063..cbcd0920fb5121ba44bd7c87de9a3b3fdd9f51d6 100644 (file)
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
                                dev_name(&usb_dev->dev), retval);
                return (retval < 0) ? retval : -EMSGSIZE;
        }
-       if (usb_dev->speed == USB_SPEED_SUPER) {
+
+       if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
                retval = usb_get_bos_descriptor(usb_dev);
-               if (retval < 0) {
+               if (!retval) {
+                       usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
+               } else if (usb_dev->speed == USB_SPEED_SUPER) {
                        mutex_unlock(&usb_bus_list_lock);
                        dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
                                        dev_name(&usb_dev->dev), retval);
index 43cb2f2e3b4375aee6c362e8b08d5f2b71865d8f..73dfa194160b78fba6bec233b667b00bceac6cb1 100644 (file)
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
        return usb_get_intfdata(hdev->actconfig->interface[0]);
 }
 
-static int usb_device_supports_lpm(struct usb_device *udev)
+int usb_device_supports_lpm(struct usb_device *udev)
 {
        /* USB 2.1 (and greater) devices indicate LPM support through
         * their USB 2.0 Extended Capabilities BOS descriptor.
index 7eb1e26798e5f293a3f2bc508dd268bbecdb63f1..457255a3306a3c2837674d22aa9560bf30842220 100644 (file)
@@ -65,6 +65,7 @@ extern int  usb_hub_init(void);
 extern void usb_hub_cleanup(void);
 extern int usb_major_init(void);
 extern void usb_major_cleanup(void);
+extern int usb_device_supports_lpm(struct usb_device *udev);
 
 #ifdef CONFIG_PM
 
index e5b546f1152ef0f8c527fb19d776372cb0877f69..c3cc1a78d1e2b3470402f7cae45e4f11aa02dc0a 100644 (file)
@@ -72,17 +72,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
        dev_dbg(hsotg->dev, "%s\n", __func__);
 
        /* Backup Host regs */
-       hr = hsotg->hr_backup;
-       if (!hr) {
-               hr = devm_kzalloc(hsotg->dev, sizeof(*hr), GFP_KERNEL);
-               if (!hr) {
-                       dev_err(hsotg->dev, "%s: can't allocate host regs\n",
-                                       __func__);
-                       return -ENOMEM;
-               }
-
-               hsotg->hr_backup = hr;
-       }
+       hr = &hsotg->hr_backup;
        hr->hcfg = readl(hsotg->regs + HCFG);
        hr->haintmsk = readl(hsotg->regs + HAINTMSK);
        for (i = 0; i < hsotg->core_params->host_channels; ++i)
@@ -90,6 +80,7 @@ static int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
 
        hr->hprt0 = readl(hsotg->regs + HPRT0);
        hr->hfir = readl(hsotg->regs + HFIR);
+       hr->valid = true;
 
        return 0;
 }
@@ -109,12 +100,13 @@ static int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
        dev_dbg(hsotg->dev, "%s\n", __func__);
 
        /* Restore host regs */
-       hr = hsotg->hr_backup;
-       if (!hr) {
+       hr = &hsotg->hr_backup;
+       if (!hr->valid) {
                dev_err(hsotg->dev, "%s: no host registers to restore\n",
                                __func__);
                return -EINVAL;
        }
+       hr->valid = false;
 
        writel(hr->hcfg, hsotg->regs + HCFG);
        writel(hr->haintmsk, hsotg->regs + HAINTMSK);
@@ -152,17 +144,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
        dev_dbg(hsotg->dev, "%s\n", __func__);
 
        /* Backup dev regs */
-       dr = hsotg->dr_backup;
-       if (!dr) {
-               dr = devm_kzalloc(hsotg->dev, sizeof(*dr), GFP_KERNEL);
-               if (!dr) {
-                       dev_err(hsotg->dev, "%s: can't allocate device regs\n",
-                                       __func__);
-                       return -ENOMEM;
-               }
-
-               hsotg->dr_backup = dr;
-       }
+       dr = &hsotg->dr_backup;
 
        dr->dcfg = readl(hsotg->regs + DCFG);
        dr->dctl = readl(hsotg->regs + DCTL);
@@ -195,7 +177,7 @@ static int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg)
                dr->doeptsiz[i] = readl(hsotg->regs + DOEPTSIZ(i));
                dr->doepdma[i] = readl(hsotg->regs + DOEPDMA(i));
        }
-
+       dr->valid = true;
        return 0;
 }
 
@@ -215,12 +197,13 @@ static int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg)
        dev_dbg(hsotg->dev, "%s\n", __func__);
 
        /* Restore dev regs */
-       dr = hsotg->dr_backup;
-       if (!dr) {
+       dr = &hsotg->dr_backup;
+       if (!dr->valid) {
                dev_err(hsotg->dev, "%s: no device registers to restore\n",
                                __func__);
                return -EINVAL;
        }
+       dr->valid = false;
 
        writel(dr->dcfg, hsotg->regs + DCFG);
        writel(dr->dctl, hsotg->regs + DCTL);
@@ -268,17 +251,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
        int i;
 
        /* Backup global regs */
-       gr = hsotg->gr_backup;
-       if (!gr) {
-               gr = devm_kzalloc(hsotg->dev, sizeof(*gr), GFP_KERNEL);
-               if (!gr) {
-                       dev_err(hsotg->dev, "%s: can't allocate global regs\n",
-                                       __func__);
-                       return -ENOMEM;
-               }
-
-               hsotg->gr_backup = gr;
-       }
+       gr = &hsotg->gr_backup;
 
        gr->gotgctl = readl(hsotg->regs + GOTGCTL);
        gr->gintmsk = readl(hsotg->regs + GINTMSK);
@@ -291,6 +264,7 @@ static int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg)
        for (i = 0; i < MAX_EPS_CHANNELS; i++)
                gr->dtxfsiz[i] = readl(hsotg->regs + DPTXFSIZN(i));
 
+       gr->valid = true;
        return 0;
 }
 
@@ -309,12 +283,13 @@ static int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg)
        dev_dbg(hsotg->dev, "%s\n", __func__);
 
        /* Restore global regs */
-       gr = hsotg->gr_backup;
-       if (!gr) {
+       gr = &hsotg->gr_backup;
+       if (!gr->valid) {
                dev_err(hsotg->dev, "%s: no global registers to restore\n",
                                __func__);
                return -EINVAL;
        }
+       gr->valid = false;
 
        writel(0xffffffff, hsotg->regs + GINTSTS);
        writel(gr->gotgctl, hsotg->regs + GOTGCTL);
index 53b8de03f1028c9982e0a403fad2d487ca09bd45..0ed87620941b5606b83f7e628439266ca7831e18 100644 (file)
@@ -492,6 +492,7 @@ struct dwc2_gregs_backup {
        u32 gdfifocfg;
        u32 dtxfsiz[MAX_EPS_CHANNELS];
        u32 gpwrdn;
+       bool valid;
 };
 
 /**
@@ -521,6 +522,7 @@ struct dwc2_dregs_backup {
        u32 doepctl[MAX_EPS_CHANNELS];
        u32 doeptsiz[MAX_EPS_CHANNELS];
        u32 doepdma[MAX_EPS_CHANNELS];
+       bool valid;
 };
 
 /**
@@ -538,6 +540,7 @@ struct dwc2_hregs_backup {
        u32 hcintmsk[MAX_EPS_CHANNELS];
        u32 hprt0;
        u32 hfir;
+       bool valid;
 };
 
 /**
@@ -705,9 +708,9 @@ struct dwc2_hsotg {
        struct work_struct wf_otg;
        struct timer_list wkp_timer;
        enum dwc2_lx_state lx_state;
-       struct dwc2_gregs_backup *gr_backup;
-       struct dwc2_dregs_backup *dr_backup;
-       struct dwc2_hregs_backup *hr_backup;
+       struct dwc2_gregs_backup gr_backup;
+       struct dwc2_dregs_backup dr_backup;
+       struct dwc2_hregs_backup hr_backup;
 
        struct dentry *debug_root;
        struct debugfs_regset32 *regset;
index b10377c650646560996f14b7d51e5d0cb0b90dfc..f845c41fe9e5e8c788c35d0fd7711b54debbe97e 100644 (file)
@@ -359,10 +359,9 @@ void dwc2_hcd_stop(struct dwc2_hsotg *hsotg)
 
 /* Caller must hold driver lock */
 static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
-                               struct dwc2_hcd_urb *urb, void **ep_handle,
-                               gfp_t mem_flags)
+                               struct dwc2_hcd_urb *urb, struct dwc2_qh *qh,
+                               struct dwc2_qtd *qtd)
 {
-       struct dwc2_qtd *qtd;
        u32 intr_mask;
        int retval;
        int dev_speed;
@@ -386,18 +385,15 @@ static int dwc2_hcd_urb_enqueue(struct dwc2_hsotg *hsotg,
                        return -ENODEV;
        }
 
-       qtd = kzalloc(sizeof(*qtd), mem_flags);
        if (!qtd)
-               return -ENOMEM;
+               return -EINVAL;
 
        dwc2_hcd_qtd_init(qtd, urb);
-       retval = dwc2_hcd_qtd_add(hsotg, qtd, (struct dwc2_qh **)ep_handle,
-                                 mem_flags);
+       retval = dwc2_hcd_qtd_add(hsotg, qtd, qh);
        if (retval) {
                dev_err(hsotg->dev,
                        "DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
                        retval);
-               kfree(qtd);
                return retval;
        }
 
@@ -2445,6 +2441,9 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
        u32 tflags = 0;
        void *buf;
        unsigned long flags;
+       struct dwc2_qh *qh;
+       bool qh_allocated = false;
+       struct dwc2_qtd *qtd;
 
        if (dbg_urb(urb)) {
                dev_vdbg(hsotg->dev, "DWC OTG HCD URB Enqueue\n");
@@ -2523,15 +2522,32 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
                                                 urb->iso_frame_desc[i].length);
 
        urb->hcpriv = dwc2_urb;
+       qh = (struct dwc2_qh *) ep->hcpriv;
+       /* Create QH for the endpoint if it doesn't exist */
+       if (!qh) {
+               qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags);
+               if (!qh) {
+                       retval = -ENOMEM;
+                       goto fail0;
+               }
+               ep->hcpriv = qh;
+               qh_allocated = true;
+       }
+
+       qtd = kzalloc(sizeof(*qtd), mem_flags);
+       if (!qtd) {
+               retval = -ENOMEM;
+               goto fail1;
+       }
 
        spin_lock_irqsave(&hsotg->lock, flags);
        retval = usb_hcd_link_urb_to_ep(hcd, urb);
        if (retval)
-               goto fail1;
+               goto fail2;
 
-       retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags);
+       retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd);
        if (retval)
-               goto fail2;
+               goto fail3;
 
        if (alloc_bandwidth) {
                dwc2_allocate_bus_bandwidth(hcd,
@@ -2543,12 +2559,25 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
 
        return 0;
 
-fail2:
+fail3:
        dwc2_urb->priv = NULL;
        usb_hcd_unlink_urb_from_ep(hcd, urb);
-fail1:
+fail2:
        spin_unlock_irqrestore(&hsotg->lock, flags);
        urb->hcpriv = NULL;
+       kfree(qtd);
+fail1:
+       if (qh_allocated) {
+               struct dwc2_qtd *qtd2, *qtd2_tmp;
+
+               ep->hcpriv = NULL;
+               dwc2_hcd_qh_unlink(hsotg, qh);
+               /* Free each QTD in the QH's QTD list */
+               list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list,
+                                                        qtd_list_entry)
+                       dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh);
+               dwc2_hcd_qh_free(hsotg, qh);
+       }
 fail0:
        kfree(dwc2_urb);
 
index 7b5841c4003301b349b499fdfb500e860ba86c74..fc1054965552aca51fa7f9bcdf87bae769077437 100644 (file)
@@ -463,6 +463,9 @@ extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg,
 /* Schedule Queue Functions */
 /* Implemented in hcd_queue.c */
 extern void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg);
+extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+                                         struct dwc2_hcd_urb *urb,
+                                         gfp_t mem_flags);
 extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
 extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
 extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh);
@@ -471,7 +474,7 @@ extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
 
 extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
 extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
-                           struct dwc2_qh **qh, gfp_t mem_flags);
+                           struct dwc2_qh *qh);
 
 /* Unlinks and frees a QTD */
 static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
index 9b5c36256627b0b163fb25fc7549c46e1944ed35..3ad63d392e13f51c5e9aa8a31069de3795cdf86f 100644 (file)
@@ -191,7 +191,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
  *
  * Return: Pointer to the newly allocated QH, or NULL on error
  */
-static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
+struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
                                          struct dwc2_hcd_urb *urb,
                                          gfp_t mem_flags)
 {
@@ -767,57 +767,32 @@ void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
  *
  * @hsotg:        The DWC HCD structure
  * @qtd:          The QTD to add
- * @qh:           Out parameter to return queue head
- * @atomic_alloc: Flag to do atomic alloc if needed
+ * @qh:           Queue head to add qtd to
  *
  * Return: 0 if successful, negative error code otherwise
  *
- * Finds the correct QH to place the QTD into. If it does not find a QH, it
- * will create a new QH. If the QH to which the QTD is added is not currently
- * scheduled, it is placed into the proper schedule based on its EP type.
+ * If the QH to which the QTD is added is not currently scheduled, it is placed
+ * into the proper schedule based on its EP type.
  */
 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
-                    struct dwc2_qh **qh, gfp_t mem_flags)
+                    struct dwc2_qh *qh)
 {
-       struct dwc2_hcd_urb *urb = qtd->urb;
-       int allocated = 0;
        int retval;
 
-       /*
-        * Get the QH which holds the QTD-list to insert to. Create QH if it
-        * doesn't exist.
-        */
-       if (*qh == NULL) {
-               *qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
-               if (*qh == NULL)
-                       return -ENOMEM;
-               allocated = 1;
+       if (unlikely(!qh)) {
+               dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
+               retval = -EINVAL;
+               goto fail;
        }
 
-       retval = dwc2_hcd_qh_add(hsotg, *qh);
+       retval = dwc2_hcd_qh_add(hsotg, qh);
        if (retval)
                goto fail;
 
-       qtd->qh = *qh;
-       list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
+       qtd->qh = qh;
+       list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
 
        return 0;
-
 fail:
-       if (allocated) {
-               struct dwc2_qtd *qtd2, *qtd2_tmp;
-               struct dwc2_qh *qh_tmp = *qh;
-
-               *qh = NULL;
-               dwc2_hcd_qh_unlink(hsotg, qh_tmp);
-
-               /* Free each QTD in the QH's QTD list */
-               list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
-                                        qtd_list_entry)
-                       dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
-
-               dwc2_hcd_qh_free(hsotg, qh_tmp);
-       }
-
        return retval;
 }
index 5c110d8e293b534df24ff45f784f02a3d84d5644..ff5773c66b84f22b8ffabe986546d2810d0a9864 100644 (file)
@@ -446,10 +446,12 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
        /* Select the HS PHY interface */
        switch (DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3)) {
        case DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI:
-               if (!strncmp(dwc->hsphy_interface, "utmi", 4)) {
+               if (dwc->hsphy_interface &&
+                               !strncmp(dwc->hsphy_interface, "utmi", 4)) {
                        reg &= ~DWC3_GUSB2PHYCFG_ULPI_UTMI;
                        break;
-               } else if (!strncmp(dwc->hsphy_interface, "ulpi", 4)) {
+               } else if (dwc->hsphy_interface &&
+                               !strncmp(dwc->hsphy_interface, "ulpi", 4)) {
                        reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
                        dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
                } else {
index 2ef3c8d6a9dbd3b5b8270cb5af230d793e0d7230..69e769c35cf5dc798fb34a6924c19726690592a2 100644 (file)
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
                dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
                ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
                break;
+       case USB_REQ_SET_INTERFACE:
+               dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+               dwc->start_config_issued = false;
+               /* Fall through */
        default:
                dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
                ret = dwc3_ep0_delegate_req(dwc, ctrl);
index 4e3447bbd0976e42eda4ec2a5d71f6cb4e9f75c9..58b4657fc721d3dabda76b2138ea6c30d539992c 100644 (file)
@@ -1758,10 +1758,13 @@ unknown:
                 * take such requests too, if that's ever needed:  to work
                 * in config 0, etc.
                 */
-               list_for_each_entry(f, &cdev->config->functions, list)
-                       if (f->req_match && f->req_match(f, ctrl))
-                               goto try_fun_setup;
-               f = NULL;
+               if (cdev->config) {
+                       list_for_each_entry(f, &cdev->config->functions, list)
+                               if (f->req_match && f->req_match(f, ctrl))
+                                       goto try_fun_setup;
+                       f = NULL;
+               }
+
                switch (ctrl->bRequestType & USB_RECIP_MASK) {
                case USB_RECIP_INTERFACE:
                        if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
index 0495c94a23d7e96a9c3554b99893b6b0f63e7302..289e20119fea25eb1d670c2abbef7ca3f671633e 100644 (file)
@@ -571,7 +571,7 @@ static struct config_group *function_make(
        if (IS_ERR(fi))
                return ERR_CAST(fi);
 
-       ret = config_item_set_name(&fi->group.cg_item, name);
+       ret = config_item_set_name(&fi->group.cg_item, "%s", name);
        if (ret) {
                usb_put_function_instance(fi);
                return ERR_PTR(ret);
index 45b8c8b338df7a877cebe8ef6743a675c31aa31e..6e7be91e6097cf0cfb4d6106decc141aeb8a5eb3 100644 (file)
@@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
 
        kiocb->private = p;
 
-       kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+       if (p->aio)
+               kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
 
        res = ffs_epfile_io(kiocb->ki_filp, p);
        if (res == -EIOCBQUEUED)
@@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 
        kiocb->private = p;
 
-       kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+       if (p->aio)
+               kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
 
        res = ffs_epfile_io(kiocb->ki_filp, p);
        if (res == -EIOCBQUEUED)
index d2259c6639960c55c76239401148bbcc2ec1f449..f936268d26c6aaa9a5601614adf2e90319e0d364 100644 (file)
@@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
                return -EINVAL;
        }
 
-       curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
+       curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
        if (unlikely(!curlun))
                return -ENOMEM;
 
@@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
        common->luns = curlun;
        common->nluns = nluns;
 
-       pr_info("Number of LUNs=%d\n", common->nluns);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
@@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
        struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
        struct fsg_common *common = opts->common;
        struct fsg_dev *fsg;
+       unsigned nluns, i;
 
        fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
        if (unlikely(!fsg))
                return ERR_PTR(-ENOMEM);
 
        mutex_lock(&opts->lock);
+       if (!opts->refcnt) {
+               for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
+                       if (common->luns[i])
+                               nluns = i + 1;
+               if (!nluns)
+                       pr_warn("No LUNS defined, continuing anyway\n");
+               else
+                       common->nluns = nluns;
+               pr_info("Number of LUNs=%u\n", common->nluns);
+       }
        opts->refcnt++;
        mutex_unlock(&opts->lock);
+
        fsg->function.name      = FSG_DRIVER_DESC;
        fsg->function.bind      = fsg_bind;
        fsg->function.unbind    = fsg_unbind;
index 6316aa5b1c4947a6df2e08b4c45856dc77b94374..ad50a67c14656aa70f6804572486954f0e389887 100644 (file)
@@ -1145,7 +1145,7 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
        if (opts->id && !midi->id) {
                status = -ENOMEM;
                mutex_unlock(&opts->lock);
-               goto kstrdup_fail;
+               goto setup_fail;
        }
        midi->in_ports = opts->in_ports;
        midi->out_ports = opts->out_ports;
@@ -1164,8 +1164,6 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi)
 
        return &midi->func;
 
-kstrdup_fail:
-       f_midi_unregister_card(midi);
 setup_fail:
        for (--i; i >= 0; i--)
                kfree(midi->in_port[i]);
index e547ea7f56b13375095031a42d4c40757f64015e..1137e3384218c11e738b88067f01fd8ee4105f25 100644 (file)
@@ -1171,7 +1171,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
                          udc_name, fotg210);
        if (ret < 0) {
                pr_err("request_irq error (%d)\n", ret);
-               goto err_irq;
+               goto err_req;
        }
 
        ret = usb_add_gadget_udc(&pdev->dev, &fotg210->gadget);
@@ -1183,7 +1183,6 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        return 0;
 
 err_add_udc:
-err_irq:
        free_irq(ires->start, fotg210);
 
 err_req:
index d32160d6463f5fd3ca16442cdb37f0f0289fb5dc..5da37c957b53ce34bd9820ebfd57b9f20a5815c4 100644 (file)
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       udc->phy_regs = ioremap(r->start, resource_size(r));
+       udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
        if (udc->phy_regs == NULL) {
                dev_err(&pdev->dev, "failed to map phy I/O memory\n");
                return -EBUSY;
index d69c35558f6852beecd1bfc85acee35c26781ec8..362ee8af5fce87df4a2e7779f743c2ec722d9a5c 100644 (file)
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
 int usb_gadget_map_request(struct usb_gadget *gadget,
                struct usb_request *req, int is_in)
 {
+       struct device *dev = gadget->dev.parent;
+
        if (req->length == 0)
                return 0;
 
        if (req->num_sgs) {
                int     mapped;
 
-               mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs,
+               mapped = dma_map_sg(dev, req->sg, req->num_sgs,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
                if (mapped == 0) {
                        dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
 
                req->num_mapped_sgs = mapped;
        } else {
-               req->dma = dma_map_single(&gadget->dev, req->buf, req->length,
+               req->dma = dma_map_single(dev, req->buf, req->length,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
-               if (dma_mapping_error(&gadget->dev, req->dma)) {
-                       dev_err(&gadget->dev, "failed to map buffer\n");
+               if (dma_mapping_error(dev, req->dma)) {
+                       dev_err(dev, "failed to map buffer\n");
                        return -EFAULT;
                }
        }
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
                return;
 
        if (req->num_mapped_sgs) {
-               dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs,
+               dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
                req->num_mapped_sgs = 0;
        } else {
-               dma_unmap_single(&gadget->dev, req->dma, req->length,
+               dma_unmap_single(gadget->dev.parent, req->dma, req->length,
                                is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        }
 }
index f7d561ed3c236290aa90334b90bd52cbfa9f7632..d029bbe9eb36a884fed45fd6f648bfca51199d65 100644 (file)
@@ -981,10 +981,6 @@ rescan_all:
                int                     completed, modified;
                __hc32                  *prev;
 
-               /* Is this ED already invisible to the hardware? */
-               if (ed->state == ED_IDLE)
-                       goto ed_idle;
-
                /* only take off EDs that the HC isn't using, accounting for
                 * frame counter wraps and EDs with partially retired TDs
                 */
@@ -1012,12 +1008,10 @@ skip_ed:
                }
 
                /* ED's now officially unlinked, hc doesn't see */
-               ed->state = ED_IDLE;
                ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
                ed->hwNextED = 0;
                wmb();
                ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
-ed_idle:
 
                /* reentrancy:  if we drop the schedule lock, someone might
                 * have modified this list.  normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
                if (list_empty(&ed->td_list)) {
                        *last = ed->ed_next;
                        ed->ed_next = NULL;
+                       ed->state = ED_IDLE;
                        list_del(&ed->in_use_list);
                } else if (ohci->rh_state == OHCI_RH_RUNNING) {
                        *last = ed->ed_next;
index e9a6eec39142584032f777aa6101c00604b5ccf5..cfcfadfc94fc25b8e10d788e59514c146d32ccc1 100644 (file)
@@ -58,7 +58,7 @@
 #define CCR_PM_CKRNEN    0x0002
 #define CCR_PM_USBPW1    0x0004
 #define CCR_PM_USBPW2    0x0008
-#define CCR_PM_USBPW3    0x0008
+#define CCR_PM_USBPW3    0x0010
 #define CCR_PM_PMEE      0x0100
 #define CCR_PM_PMES      0x8000
 
index e75c565feb53ef3022312c047268367de71ae059..78241b5550df877fb09189936867f7821e09d11a 100644 (file)
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
        u32 pls = status_reg & PORT_PLS_MASK;
 
        /* resume state is a xHCI internal state.
-        * Do not report it to usb core.
+        * Do not report it to usb core, instead, pretend to be U3,
+        * thus usb core knows it's not ready for transfer
         */
-       if (pls == XDEV_RESUME)
+       if (pls == XDEV_RESUME) {
+               *status |= USB_SS_PORT_LS_U3;
                return;
+       }
 
        /* When the CAS bit is set then warm reset
         * should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                status |= USB_PORT_STAT_C_RESET << 16;
        /* USB3.0 only */
        if (hcd->speed == HCD_USB3) {
-               if ((raw_port_status & PORT_PLC))
+               /* Port link change with port in resume state should not be
+                * reported to usbcore, as this is an internal state to be
+                * handled by xhci driver. Reporting PLC to usbcore may
+                * cause usbcore clearing PLC first and port change event
+                * irq won't be generated.
+                */
+               if ((raw_port_status & PORT_PLC) &&
+                       (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
                        status |= USB_PORT_STAT_C_LINK_STATE << 16;
                if ((raw_port_status & PORT_WRC))
                        status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        spin_lock_irqsave(&xhci->lock, flags);
 
        if (hcd->self.root_hub->do_remote_wakeup) {
-               if (bus_state->resuming_ports) {
+               if (bus_state->resuming_ports ||        /* USB2 */
+                   bus_state->port_remote_wakeup) {    /* USB3 */
                        spin_unlock_irqrestore(&xhci->lock, flags);
-                       xhci_dbg(xhci, "suspend failed because "
-                                               "a port is resuming\n");
+                       xhci_dbg(xhci, "suspend failed because a port is resuming\n");
                        return -EBUSY;
                }
        }
index f8336408ef07c4354ad54c43e988b6b43272eea0..3e442f77a2b9367c5bd90d2ab69beb2db841fd26 100644 (file)
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                /* Attempt to use the ring cache */
                if (virt_dev->num_rings_cached == 0)
                        return -ENOMEM;
+               virt_dev->num_rings_cached--;
                virt_dev->eps[ep_index].new_ring =
                        virt_dev->ring_cache[virt_dev->num_rings_cached];
                virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
-               virt_dev->num_rings_cached--;
                xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
                                        1, type);
        }
index 4a4cb1d91ac8465d5ff274f5ded1a62e299cf92f..5590eac2b22df26ea4150d7bb8a8e1eeb2406ae3 100644 (file)
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/acpi.h>
 
 #include "xhci.h"
 #include "xhci-trace.h"
 
+#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define PROG_DONE              (1 << 30)
+#define SSIC_PORT_UNUSED       (1 << 31)
+
 /* Device for a quirk */
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 }
 
 /*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
  * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
  * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
  */
-static void xhci_pme_quirk(struct xhci_hcd *xhci)
+static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
 {
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
        u32 val;
        void __iomem *reg;
 
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+
+               reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+
+               /* Notify SSIC that SSIC profile programming is not done */
+               val = readl(reg) & ~PROG_DONE;
+               writel(val, reg);
+
+               /* Mark SSIC port as unused(suspend) or used(resume) */
+               val = readl(reg);
+               if (suspend)
+                       val |= SSIC_PORT_UNUSED;
+               else
+                       val &= ~SSIC_PORT_UNUSED;
+               writel(val, reg);
+
+               /* Notify SSIC that SSIC profile programming is done */
+               val = readl(reg) | PROG_DONE;
+               writel(val, reg);
+               readl(reg);
+       }
+
        reg = (void __iomem *) xhci->cap_regs + 0x80a4;
        val = readl(reg);
        writel(val | BIT(28), reg);
        readl(reg);
 }
 
+#ifdef CONFIG_ACPI
+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+{
+       static const u8 intel_dsm_uuid[] = {
+               0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
+               0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
+       };
+       acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
+}
+#else
+       static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+#endif /* CONFIG_ACPI */
+
 /* called during probe() after chip reset completes */
 static int xhci_pci_setup(struct usb_hcd *hcd)
 {
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        HCC_MAX_PSA(xhci->hcc_params) >= 4)
                xhci->shared_hcd->can_do_streams = 1;
 
+       if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+               xhci_pme_acpi_rtd3_enable(dev);
+
        /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
        pm_runtime_put_noidle(&dev->dev);
 
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
                pdev->no_d3cold = true;
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-               xhci_pme_quirk(xhci);
+               xhci_pme_quirk(hcd, true);
 
        return xhci_suspend(xhci, do_wakeup);
 }
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
                usb_enable_intel_xhci_ports(pdev);
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-               xhci_pme_quirk(xhci);
+               xhci_pme_quirk(hcd, false);
 
        retval = xhci_resume(xhci, hibernated);
        return retval;
index 94416ff7081071e3f32a682493e6a34bb8d359a8..6a8fc52aed5863391885ac11c2661a407f707a30 100644 (file)
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
                usb_hcd_resume_root_hub(hcd);
        }
 
+       if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+               bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
+
        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
 
index 7da0d6043d33e13afa3ce4db45d5dfa54b4d3d22..526ebc0c7e720b9d766bcf6abf1bc65672e584bb 100644 (file)
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
                        return -EINVAL;
        }
 
+       if (virt_dev->tt_info)
+               old_active_eps = virt_dev->tt_info->active_eps;
+
        if (virt_dev->udev != udev) {
                /* If the virt_dev and the udev does not match, this virt_dev
                 * may belong to another udev.
index 31e46cc55807a83a69f9e6e8ce9e89f176786215..ed2ebf647c380ebbdfe647544fb137283219cf87 100644 (file)
@@ -285,6 +285,7 @@ struct xhci_op_regs {
 #define XDEV_U0                (0x0 << 5)
 #define XDEV_U2                (0x2 << 5)
 #define XDEV_U3                (0x3 << 5)
+#define XDEV_INACTIVE  (0x6 << 5)
 #define XDEV_RESUME    (0xf << 5)
 /* true: port has power (see HCC_PPC) */
 #define PORT_POWER     (1 << 9)
index 30842bc195f57fced81ff6190f4758570ee77d3e..92d5f718659b7ebbeba0898428de681af18fdf2a 100644 (file)
@@ -275,9 +275,7 @@ static int musb_has_gadget(struct musb *musb)
 #ifdef CONFIG_USB_MUSB_HOST
        return 1;
 #else
-       if (musb->port_mode == MUSB_PORT_MODE_HOST)
-               return 1;
-       return musb->g.dev.driver != NULL;
+       return musb->port_mode == MUSB_PORT_MODE_HOST;
 #endif
 }
 
index 8f7cb068d29bab1eb1f9641e41cbe4da30e22489..3fcc0483a0811e8f7e2984aea1c4a11ce2da0951 100644 (file)
@@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
 {
        unsigned int vbus_value;
 
+       if (!mxs_phy->regmap_anatop)
+               return false;
+
        if (mxs_phy->port_id == 0)
                regmap_read(mxs_phy->regmap_anatop,
                        ANADIG_USB1_VBUS_DET_STAT,
index ffd739e31bfc193b058628560e86ea6f9b96f375..eac7ccaa3c859cc3bae90e897fc071caa7168cfd 100644 (file)
@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
        { USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
        { USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+       { USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
        { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
        { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
index 4f70df33975a4a9bf02d3945e8b2bb956b1e7cd6..78b4f64c6b00e457cef0bc65ed636a3e9fa8db42 100644 (file)
@@ -121,26 +121,26 @@ static DEFINE_SPINLOCK(release_lock);
 static const unsigned int dummy; /* for clarity in register access fns */
 
 enum mos_regs {
-       THR,              /* serial port regs */
-       RHR,
-       IER,
-       FCR,
-       ISR,
-       LCR,
-       MCR,
-       LSR,
-       MSR,
-       SPR,
-       DLL,
-       DLM,
-       DPR,              /* parallel port regs */
-       DSR,
-       DCR,
-       ECR,
-       SP1_REG,          /* device control regs */
-       SP2_REG,          /* serial port 2 (7720 only) */
-       PP_REG,
-       SP_CONTROL_REG,
+       MOS7720_THR,              /* serial port regs */
+       MOS7720_RHR,
+       MOS7720_IER,
+       MOS7720_FCR,
+       MOS7720_ISR,
+       MOS7720_LCR,
+       MOS7720_MCR,
+       MOS7720_LSR,
+       MOS7720_MSR,
+       MOS7720_SPR,
+       MOS7720_DLL,
+       MOS7720_DLM,
+       MOS7720_DPR,              /* parallel port regs */
+       MOS7720_DSR,
+       MOS7720_DCR,
+       MOS7720_ECR,
+       MOS7720_SP1_REG,          /* device control regs */
+       MOS7720_SP2_REG,          /* serial port 2 (7720 only) */
+       MOS7720_PP_REG,
+       MOS7720_SP_CONTROL_REG,
 };
 
 /*
@@ -150,26 +150,26 @@ enum mos_regs {
 static inline __u16 get_reg_index(enum mos_regs reg)
 {
        static const __u16 mos7715_index_lookup_table[] = {
-               0x00,           /* THR */
-               0x00,           /* RHR */
-               0x01,           /* IER */
-               0x02,           /* FCR */
-               0x02,           /* ISR */
-               0x03,           /* LCR */
-               0x04,           /* MCR */
-               0x05,           /* LSR */
-               0x06,           /* MSR */
-               0x07,           /* SPR */
-               0x00,           /* DLL */
-               0x01,           /* DLM */
-               0x00,           /* DPR */
-               0x01,           /* DSR */
-               0x02,           /* DCR */
-               0x0a,           /* ECR */
-               0x01,           /* SP1_REG */
-               0x02,           /* SP2_REG (7720 only) */
-               0x04,           /* PP_REG (7715 only) */
-               0x08,           /* SP_CONTROL_REG */
+               0x00,           /* MOS7720_THR */
+               0x00,           /* MOS7720_RHR */
+               0x01,           /* MOS7720_IER */
+               0x02,           /* MOS7720_FCR */
+               0x02,           /* MOS7720_ISR */
+               0x03,           /* MOS7720_LCR */
+               0x04,           /* MOS7720_MCR */
+               0x05,           /* MOS7720_LSR */
+               0x06,           /* MOS7720_MSR */
+               0x07,           /* MOS7720_SPR */
+               0x00,           /* MOS7720_DLL */
+               0x01,           /* MOS7720_DLM */
+               0x00,           /* MOS7720_DPR */
+               0x01,           /* MOS7720_DSR */
+               0x02,           /* MOS7720_DCR */
+               0x0a,           /* MOS7720_ECR */
+               0x01,           /* MOS7720_SP1_REG */
+               0x02,           /* MOS7720_SP2_REG (7720 only) */
+               0x04,           /* MOS7720_PP_REG (7715 only) */
+               0x08,           /* MOS7720_SP_CONTROL_REG */
        };
        return mos7715_index_lookup_table[reg];
 }
@@ -181,10 +181,10 @@ static inline __u16 get_reg_index(enum mos_regs reg)
 static inline __u16 get_reg_value(enum mos_regs reg,
                                  unsigned int serial_portnum)
 {
-       if (reg >= SP1_REG)           /* control reg */
+       if (reg >= MOS7720_SP1_REG)     /* control reg */
                return 0x0000;
 
-       else if (reg >= DPR)          /* parallel port reg (7715 only) */
+       else if (reg >= MOS7720_DPR)    /* parallel port reg (7715 only) */
                return 0x0100;
 
        else                          /* serial port reg */
@@ -252,7 +252,8 @@ static inline int mos7715_change_mode(struct mos7715_parport *mos_parport,
                                      enum mos7715_pp_modes mode)
 {
        mos_parport->shadowECR = mode;
-       write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
+                     mos_parport->shadowECR);
        return 0;
 }
 
@@ -486,7 +487,7 @@ static void parport_mos7715_write_data(struct parport *pp, unsigned char d)
        if (parport_prologue(pp) < 0)
                return;
        mos7715_change_mode(mos_parport, SPP);
-       write_mos_reg(mos_parport->serial, dummy, DPR, (__u8)d);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, (__u8)d);
        parport_epilogue(pp);
 }
 
@@ -497,7 +498,7 @@ static unsigned char parport_mos7715_read_data(struct parport *pp)
 
        if (parport_prologue(pp) < 0)
                return 0;
-       read_mos_reg(mos_parport->serial, dummy, DPR, &d);
+       read_mos_reg(mos_parport->serial, dummy, MOS7720_DPR, &d);
        parport_epilogue(pp);
        return d;
 }
@@ -510,7 +511,7 @@ static void parport_mos7715_write_control(struct parport *pp, unsigned char d)
        if (parport_prologue(pp) < 0)
                return;
        data = ((__u8)d & 0x0f) | (mos_parport->shadowDCR & 0xf0);
-       write_mos_reg(mos_parport->serial, dummy, DCR, data);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR, data);
        mos_parport->shadowDCR = data;
        parport_epilogue(pp);
 }
@@ -543,7 +544,8 @@ static unsigned char parport_mos7715_frob_control(struct parport *pp,
        if (parport_prologue(pp) < 0)
                return 0;
        mos_parport->shadowDCR = (mos_parport->shadowDCR & (~mask)) ^ val;
-       write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+                     mos_parport->shadowDCR);
        dcr = mos_parport->shadowDCR & 0x0f;
        parport_epilogue(pp);
        return dcr;
@@ -581,7 +583,8 @@ static void parport_mos7715_data_forward(struct parport *pp)
                return;
        mos7715_change_mode(mos_parport, PS2);
        mos_parport->shadowDCR &=  ~0x20;
-       write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+                     mos_parport->shadowDCR);
        parport_epilogue(pp);
 }
 
@@ -593,7 +596,8 @@ static void parport_mos7715_data_reverse(struct parport *pp)
                return;
        mos7715_change_mode(mos_parport, PS2);
        mos_parport->shadowDCR |= 0x20;
-       write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+                     mos_parport->shadowDCR);
        parport_epilogue(pp);
 }
 
@@ -633,8 +637,10 @@ static void parport_mos7715_restore_state(struct parport *pp,
                spin_unlock(&release_lock);
                return;
        }
-       write_parport_reg_nonblock(mos_parport, DCR, mos_parport->shadowDCR);
-       write_parport_reg_nonblock(mos_parport, ECR, mos_parport->shadowECR);
+       write_parport_reg_nonblock(mos_parport, MOS7720_DCR,
+                                  mos_parport->shadowDCR);
+       write_parport_reg_nonblock(mos_parport, MOS7720_ECR,
+                                  mos_parport->shadowECR);
        spin_unlock(&release_lock);
 }
 
@@ -714,14 +720,16 @@ static int mos7715_parport_init(struct usb_serial *serial)
        init_completion(&mos_parport->syncmsg_compl);
 
        /* cycle parallel port reset bit */
-       write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x80);
-       write_mos_reg(mos_parport->serial, dummy, PP_REG, (__u8)0x00);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x80);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_PP_REG, (__u8)0x00);
 
        /* initialize device registers */
        mos_parport->shadowDCR = DCR_INIT_VAL;
-       write_mos_reg(mos_parport->serial, dummy, DCR, mos_parport->shadowDCR);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_DCR,
+                     mos_parport->shadowDCR);
        mos_parport->shadowECR = ECR_INIT_VAL;
-       write_mos_reg(mos_parport->serial, dummy, ECR, mos_parport->shadowECR);
+       write_mos_reg(mos_parport->serial, dummy, MOS7720_ECR,
+                     mos_parport->shadowECR);
 
        /* register with parport core */
        mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE,
@@ -1033,45 +1041,49 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
         /* Initialize MCS7720 -- Write Init values to corresponding Registers
          *
          * Register Index
-         * 0 : THR/RHR
-         * 1 : IER
-         * 2 : FCR
-         * 3 : LCR
-         * 4 : MCR
-         * 5 : LSR
-         * 6 : MSR
-         * 7 : SPR
+         * 0 : MOS7720_THR/MOS7720_RHR
+         * 1 : MOS7720_IER
+         * 2 : MOS7720_FCR
+         * 3 : MOS7720_LCR
+         * 4 : MOS7720_MCR
+         * 5 : MOS7720_LSR
+         * 6 : MOS7720_MSR
+         * 7 : MOS7720_SPR
          *
          * 0x08 : SP1/2 Control Reg
          */
        port_number = port->port_number;
-       read_mos_reg(serial, port_number, LSR, &data);
+       read_mos_reg(serial, port_number, MOS7720_LSR, &data);
 
        dev_dbg(&port->dev, "SS::%p LSR:%x\n", mos7720_port, data);
 
-       write_mos_reg(serial, dummy, SP1_REG, 0x02);
-       write_mos_reg(serial, dummy, SP2_REG, 0x02);
+       write_mos_reg(serial, dummy, MOS7720_SP1_REG, 0x02);
+       write_mos_reg(serial, dummy, MOS7720_SP2_REG, 0x02);
 
-       write_mos_reg(serial, port_number, IER, 0x00);
-       write_mos_reg(serial, port_number, FCR, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
 
-       write_mos_reg(serial, port_number, FCR, 0xcf);
+       write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
        mos7720_port->shadowLCR = 0x03;
-       write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+       write_mos_reg(serial, port_number, MOS7720_LCR,
+                     mos7720_port->shadowLCR);
        mos7720_port->shadowMCR = 0x0b;
-       write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+       write_mos_reg(serial, port_number, MOS7720_MCR,
+                     mos7720_port->shadowMCR);
 
-       write_mos_reg(serial, port_number, SP_CONTROL_REG, 0x00);
-       read_mos_reg(serial, dummy, SP_CONTROL_REG, &data);
+       write_mos_reg(serial, port_number, MOS7720_SP_CONTROL_REG, 0x00);
+       read_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, &data);
        data = data | (port->port_number + 1);
-       write_mos_reg(serial, dummy, SP_CONTROL_REG, data);
+       write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, data);
        mos7720_port->shadowLCR = 0x83;
-       write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
-       write_mos_reg(serial, port_number, THR, 0x0c);
-       write_mos_reg(serial, port_number, IER, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_LCR,
+                     mos7720_port->shadowLCR);
+       write_mos_reg(serial, port_number, MOS7720_THR, 0x0c);
+       write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
        mos7720_port->shadowLCR = 0x03;
-       write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
-       write_mos_reg(serial, port_number, IER, 0x0c);
+       write_mos_reg(serial, port_number, MOS7720_LCR,
+                     mos7720_port->shadowLCR);
+       write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
 
        response = usb_submit_urb(port->read_urb, GFP_KERNEL);
        if (response)
@@ -1144,8 +1156,8 @@ static void mos7720_close(struct usb_serial_port *port)
        usb_kill_urb(port->write_urb);
        usb_kill_urb(port->read_urb);
 
-       write_mos_reg(serial, port->port_number, MCR, 0x00);
-       write_mos_reg(serial, port->port_number, IER, 0x00);
+       write_mos_reg(serial, port->port_number, MOS7720_MCR, 0x00);
+       write_mos_reg(serial, port->port_number, MOS7720_IER, 0x00);
 
        mos7720_port->open = 0;
 }
@@ -1169,7 +1181,8 @@ static void mos7720_break(struct tty_struct *tty, int break_state)
                data = mos7720_port->shadowLCR & ~UART_LCR_SBC;
 
        mos7720_port->shadowLCR  = data;
-       write_mos_reg(serial, port->port_number, LCR, mos7720_port->shadowLCR);
+       write_mos_reg(serial, port->port_number, MOS7720_LCR,
+                     mos7720_port->shadowLCR);
 }
 
 /*
@@ -1297,7 +1310,7 @@ static void mos7720_throttle(struct tty_struct *tty)
        /* if we are implementing RTS/CTS, toggle that line */
        if (tty->termios.c_cflag & CRTSCTS) {
                mos7720_port->shadowMCR &= ~UART_MCR_RTS;
-               write_mos_reg(port->serial, port->port_number, MCR,
+               write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
                              mos7720_port->shadowMCR);
        }
 }
@@ -1327,7 +1340,7 @@ static void mos7720_unthrottle(struct tty_struct *tty)
        /* if we are implementing RTS/CTS, toggle that line */
        if (tty->termios.c_cflag & CRTSCTS) {
                mos7720_port->shadowMCR |= UART_MCR_RTS;
-               write_mos_reg(port->serial, port->port_number, MCR,
+               write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
                              mos7720_port->shadowMCR);
        }
 }
@@ -1352,35 +1365,39 @@ static int set_higher_rates(struct moschip_port *mos7720_port,
        dev_dbg(&port->dev, "Sending Setting Commands ..........\n");
        port_number = port->port_number;
 
-       write_mos_reg(serial, port_number, IER, 0x00);
-       write_mos_reg(serial, port_number, FCR, 0x00);
-       write_mos_reg(serial, port_number, FCR, 0xcf);
+       write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
        mos7720_port->shadowMCR = 0x0b;
-       write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
-       write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_MCR,
+                     mos7720_port->shadowMCR);
+       write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x00);
 
        /***********************************************
         *              Set for higher rates           *
         ***********************************************/
        /* writing baud rate verbatum into uart clock field clearly not right */
        if (port_number == 0)
-               sp_reg = SP1_REG;
+               sp_reg = MOS7720_SP1_REG;
        else
-               sp_reg = SP2_REG;
+               sp_reg = MOS7720_SP2_REG;
        write_mos_reg(serial, dummy, sp_reg, baud * 0x10);
-       write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x03);
+       write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG, 0x03);
        mos7720_port->shadowMCR = 0x2b;
-       write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+       write_mos_reg(serial, port_number, MOS7720_MCR,
+                     mos7720_port->shadowMCR);
 
        /***********************************************
         *              Set DLL/DLM
         ***********************************************/
        mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
-       write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
-       write_mos_reg(serial, port_number, DLL, 0x01);
-       write_mos_reg(serial, port_number, DLM, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_LCR,
+                     mos7720_port->shadowLCR);
+       write_mos_reg(serial, port_number, MOS7720_DLL, 0x01);
+       write_mos_reg(serial, port_number, MOS7720_DLM, 0x00);
        mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
-       write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+       write_mos_reg(serial, port_number, MOS7720_LCR,
+                     mos7720_port->shadowLCR);
 
        return 0;
 }
@@ -1488,15 +1505,16 @@ static int send_cmd_write_baud_rate(struct moschip_port *mos7720_port,
 
        /* Enable access to divisor latch */
        mos7720_port->shadowLCR = mos7720_port->shadowLCR | UART_LCR_DLAB;
-       write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
+       write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
 
        /* Write the divisor */
-       write_mos_reg(serial, number, DLL, (__u8)(divisor & 0xff));
-       write_mos_reg(serial, number, DLM, (__u8)((divisor & 0xff00) >> 8));
+       write_mos_reg(serial, number, MOS7720_DLL, (__u8)(divisor & 0xff));
+       write_mos_reg(serial, number, MOS7720_DLM,
+                     (__u8)((divisor & 0xff00) >> 8));
 
        /* Disable access to divisor latch */
        mos7720_port->shadowLCR = mos7720_port->shadowLCR & ~UART_LCR_DLAB;
-       write_mos_reg(serial, number, LCR, mos7720_port->shadowLCR);
+       write_mos_reg(serial, number, MOS7720_LCR, mos7720_port->shadowLCR);
 
        return status;
 }
@@ -1600,14 +1618,16 @@ static void change_port_settings(struct tty_struct *tty,
 
 
        /* Disable Interrupts */
-       write_mos_reg(serial, port_number, IER, 0x00);
-       write_mos_reg(serial, port_number, FCR, 0x00);
-       write_mos_reg(serial, port_number, FCR, 0xcf);
+       write_mos_reg(serial, port_number, MOS7720_IER, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_FCR, 0x00);
+       write_mos_reg(serial, port_number, MOS7720_FCR, 0xcf);
 
        /* Send the updated LCR value to the mos7720 */
-       write_mos_reg(serial, port_number, LCR, mos7720_port->shadowLCR);
+       write_mos_reg(serial, port_number, MOS7720_LCR,
+                     mos7720_port->shadowLCR);
        mos7720_port->shadowMCR = 0x0b;
-       write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+       write_mos_reg(serial, port_number, MOS7720_MCR,
+                     mos7720_port->shadowMCR);
 
        /* set up the MCR register and send it to the mos7720 */
        mos7720_port->shadowMCR = UART_MCR_OUT2;
@@ -1619,14 +1639,17 @@ static void change_port_settings(struct tty_struct *tty,
                /* To set hardware flow control to the specified *
                 * serial port, in SP1/2_CONTROL_REG             */
                if (port_number)
-                       write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01);
+                       write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
+                                     0x01);
                else
-                       write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02);
+                       write_mos_reg(serial, dummy, MOS7720_SP_CONTROL_REG,
+                                     0x02);
 
        } else
                mos7720_port->shadowMCR &= ~(UART_MCR_XONANY);
 
-       write_mos_reg(serial, port_number, MCR, mos7720_port->shadowMCR);
+       write_mos_reg(serial, port_number, MOS7720_MCR,
+                     mos7720_port->shadowMCR);
 
        /* Determine divisor based on baud rate */
        baud = tty_get_baud_rate(tty);
@@ -1639,7 +1662,7 @@ static void change_port_settings(struct tty_struct *tty,
        if (baud >= 230400) {
                set_higher_rates(mos7720_port, baud);
                /* Enable Interrupts */
-               write_mos_reg(serial, port_number, IER, 0x0c);
+               write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
                return;
        }
 
@@ -1650,7 +1673,7 @@ static void change_port_settings(struct tty_struct *tty,
        if (cflag & CBAUD)
                tty_encode_baud_rate(tty, baud, baud);
        /* Enable Interrupts */
-       write_mos_reg(serial, port_number, IER, 0x0c);
+       write_mos_reg(serial, port_number, MOS7720_IER, 0x0c);
 
        if (port->read_urb->status != -EINPROGRESS) {
                status = usb_submit_urb(port->read_urb, GFP_KERNEL);
@@ -1725,7 +1748,7 @@ static int get_lsr_info(struct tty_struct *tty,
 
        count = mos7720_chars_in_buffer(tty);
        if (count == 0) {
-               read_mos_reg(port->serial, port_number, LSR, &data);
+               read_mos_reg(port->serial, port_number, MOS7720_LSR, &data);
                if ((data & (UART_LSR_TEMT | UART_LSR_THRE))
                                        == (UART_LSR_TEMT | UART_LSR_THRE)) {
                        dev_dbg(&port->dev, "%s -- Empty\n", __func__);
@@ -1782,7 +1805,7 @@ static int mos7720_tiocmset(struct tty_struct *tty,
                mcr &= ~UART_MCR_LOOP;
 
        mos7720_port->shadowMCR = mcr;
-       write_mos_reg(port->serial, port->port_number, MCR,
+       write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
                      mos7720_port->shadowMCR);
 
        return 0;
@@ -1827,7 +1850,7 @@ static int set_modem_info(struct moschip_port *mos7720_port, unsigned int cmd,
        }
 
        mos7720_port->shadowMCR = mcr;
-       write_mos_reg(port->serial, port->port_number, MCR,
+       write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
                      mos7720_port->shadowMCR);
 
        return 0;
@@ -1942,7 +1965,7 @@ static int mos7720_startup(struct usb_serial *serial)
        }
 #endif
        /* LSR For Port 1 */
-       read_mos_reg(serial, 0, LSR, &data);
+       read_mos_reg(serial, 0, MOS7720_LSR, &data);
        dev_dbg(&dev->dev, "LSR:%x\n", data);
 
        return 0;
index f0c0c53359ad68d5f1bc4cc5ec67ac1e152f4b99..19b85ee98a7247c46089e023676633e70eb498df 100644 (file)
@@ -1765,6 +1765,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
        { } /* Terminating entry */
index 529066bbc7e81be1bb67e398f58425febef6a8eb..46f1f13b41f1459264e292d36087962f1a666b6f 100644 (file)
@@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void)
        tty_unregister_driver(usb_serial_tty_driver);
        put_tty_driver(usb_serial_tty_driver);
        bus_unregister(&usb_serial_bus_type);
+       idr_destroy(&serial_minors);
 }
 
 
index caf188800c679e7f24fc329903848a8c1f64d41a..6b2479123de7762f7145c93fdcd58efb11f51093 100644 (file)
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_READ_DISC_INFO ),
 
+/* Reported by Oliver Neukum <oneukum@suse.com>
+ * This device morphes spontaneously into another device if the access
+ * pattern of Windows isn't followed. Thus writable media would be dirty
+ * if the initial instance is used. So the device is limited to its
+ * virtual CD.
+ * And yes, the concept that BCD goes up to 9 is not heeded */
+UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
+               "ZTE,Incorporated",
+               "ZTE WCDMA Technologies MSM",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SINGLE_LUN ),
+
 /* Reported by Sven Geggus <sven-usbst@geggus.net>
  * This encrypted pen drive returns bogus data for the initial READ(10).
  */
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV(  0x1b1c, 0x1ab5, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_INITIAL_READ10 ),
 
+/* Reported by Hans de Goede <hdegoede@redhat.com>
+ * These are mini projectors using USB for both power and video data transport
+ * The usb-storage interface is a virtual windows driver CD, which the gm12u320
+ * driver automatically converts into framebuffer & kms dri device nodes.
+ */
+UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
+               "Grain-media Technology Corp.",
+               "USB3.0 Device GM12U320",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_IGNORE_DEVICE ),
+
 /* Patch by Richard Schütz <r.schtz@t-online.de>
  * This external hard drive enclosure uses a JMicron chip which
  * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
index 2fb29dfeffbd7fa70a8a881f025d73ffd50b0b25..563c510f285c47d2a7362a4da8729fdc38e1dee0 100644 (file)
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
 
+static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
+                                                    char *buf)
+{
+       struct vfio_device *device;
+
+       mutex_lock(&group->device_lock);
+       list_for_each_entry(device, &group->device_list, group_next) {
+               if (!strcmp(dev_name(device->dev), buf)) {
+                       vfio_device_get(device);
+                       break;
+               }
+       }
+       mutex_unlock(&group->device_lock);
+
+       return device;
+}
+
 /*
  * Caller must hold a reference to the vfio_device
  */
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
 {
        struct vfio_device *device;
        struct file *filep;
-       int ret = -ENODEV;
+       int ret;
 
        if (0 == atomic_read(&group->container_users) ||
            !group->container->iommu_driver || !vfio_group_viable(group))
                return -EINVAL;
 
-       mutex_lock(&group->device_lock);
-       list_for_each_entry(device, &group->device_list, group_next) {
-               if (strcmp(dev_name(device->dev), buf))
-                       continue;
+       device = vfio_device_get_from_name(group, buf);
+       if (!device)
+               return -ENODEV;
 
-               ret = device->ops->open(device->device_data);
-               if (ret)
-                       break;
-               /*
-                * We can't use anon_inode_getfd() because we need to modify
-                * the f_mode flags directly to allow more than just ioctls
-                */
-               ret = get_unused_fd_flags(O_CLOEXEC);
-               if (ret < 0) {
-                       device->ops->release(device->device_data);
-                       break;
-               }
+       ret = device->ops->open(device->device_data);
+       if (ret) {
+               vfio_device_put(device);
+               return ret;
+       }
 
-               filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
-                                          device, O_RDWR);
-               if (IS_ERR(filep)) {
-                       put_unused_fd(ret);
-                       ret = PTR_ERR(filep);
-                       device->ops->release(device->device_data);
-                       break;
-               }
+       /*
+        * We can't use anon_inode_getfd() because we need to modify
+        * the f_mode flags directly to allow more than just ioctls
+        */
+       ret = get_unused_fd_flags(O_CLOEXEC);
+       if (ret < 0) {
+               device->ops->release(device->device_data);
+               vfio_device_put(device);
+               return ret;
+       }
 
-               /*
-                * TODO: add an anon_inode interface to do this.
-                * Appears to be missing by lack of need rather than
-                * explicitly prevented.  Now there's need.
-                */
-               filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+       filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
+                                  device, O_RDWR);
+       if (IS_ERR(filep)) {
+               put_unused_fd(ret);
+               ret = PTR_ERR(filep);
+               device->ops->release(device->device_data);
+               vfio_device_put(device);
+               return ret;
+       }
+
+       /*
+        * TODO: add an anon_inode interface to do this.
+        * Appears to be missing by lack of need rather than
+        * explicitly prevented.  Now there's need.
+        */
+       filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
 
-               vfio_device_get(device);
-               atomic_inc(&group->container_users);
+       atomic_inc(&group->container_users);
 
-               fd_install(ret, filep);
-               break;
-       }
-       mutex_unlock(&group->device_lock);
+       fd_install(ret, filep);
 
        return ret;
 }
index 9e8e004bb1c38d809c2af43b9d42c053db3a41a2..eec2f11809ff2463d2a714224925af9c679fead1 100644 (file)
 #include <linux/file.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/kthread.h>
 #include <linux/cgroup.h>
 #include <linux/module.h>
+#include <linux/sort.h>
 
 #include "vhost.h"
 
+static ushort max_mem_regions = 64;
+module_param(max_mem_regions, ushort, 0444);
+MODULE_PARM_DESC(max_mem_regions,
+       "Maximum number of memory regions in memory map. (default: 64)");
+
 enum {
-       VHOST_MEMORY_MAX_NREGIONS = 64,
        VHOST_MEMORY_F_LOG = 0x1,
 };
 
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
                fput(dev->log_file);
        dev->log_file = NULL;
        /* No one will access memory at this point */
-       kfree(dev->memory);
+       kvfree(dev->memory);
        dev->memory = NULL;
        WARN_ON(!list_empty(&dev->work_list));
        if (dev->worker) {
@@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
 }
 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
 
+static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
+{
+       const struct vhost_memory_region *r1 = p1, *r2 = p2;
+       if (r1->guest_phys_addr < r2->guest_phys_addr)
+               return 1;
+       if (r1->guest_phys_addr > r2->guest_phys_addr)
+               return -1;
+       return 0;
+}
+
+static void *vhost_kvzalloc(unsigned long size)
+{
+       void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+
+       if (!n)
+               n = vzalloc(size);
+       return n;
+}
+
 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 {
        struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
                return -EFAULT;
        if (mem.padding)
                return -EOPNOTSUPP;
-       if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+       if (mem.nregions > max_mem_regions)
                return -E2BIG;
-       newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+       newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
        if (!newmem)
                return -ENOMEM;
 
        memcpy(newmem, &mem, size);
        if (copy_from_user(newmem->regions, m->regions,
                           mem.nregions * sizeof *m->regions)) {
-               kfree(newmem);
+               kvfree(newmem);
                return -EFAULT;
        }
+       sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
+               vhost_memory_reg_sort_cmp, NULL);
 
        if (!memory_access_ok(d, newmem, 0)) {
-               kfree(newmem);
+               kvfree(newmem);
                return -EFAULT;
        }
        oldmem = d->memory;
@@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
                d->vqs[i]->memory = newmem;
                mutex_unlock(&d->vqs[i]->mutex);
        }
-       kfree(oldmem);
+       kvfree(oldmem);
        return 0;
 }
 
@@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
                }
                if (eventfp != d->log_file) {
                        filep = d->log_file;
+                       d->log_file = eventfp;
                        ctx = d->log_ctx;
                        d->log_ctx = eventfp ?
                                eventfd_ctx_fileget(eventfp) : NULL;
@@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
 static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
                                                     __u64 addr, __u32 len)
 {
-       struct vhost_memory_region *reg;
-       int i;
+       const struct vhost_memory_region *reg;
+       int start = 0, end = mem->nregions;
 
-       /* linear search is not brilliant, but we really have on the order of 6
-        * regions in practice */
-       for (i = 0; i < mem->nregions; ++i) {
-               reg = mem->regions + i;
-               if (reg->guest_phys_addr <= addr &&
-                   reg->guest_phys_addr + reg->memory_size - 1 >= addr)
-                       return reg;
+       while (start < end) {
+               int slot = start + (end - start) / 2;
+               reg = mem->regions + slot;
+               if (addr >= reg->guest_phys_addr)
+                       end = slot;
+               else
+                       start = slot + 1;
        }
+
+       reg = mem->regions + start;
+       if (addr >= reg->guest_phys_addr &&
+               reg->guest_phys_addr + reg->memory_size > addr)
+               return reg;
        return NULL;
 }
 
index c1b03f4235b99807d479672abdc6da0243107837..4e7fec36f5c36d55edd220469cac43c2d56c4840 100644 (file)
@@ -4,7 +4,7 @@
  * Watchdog driver for ARM SP805 watchdog module
  *
  * Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2 or later. This program is licensed "as is" without any
@@ -303,6 +303,6 @@ static struct amba_driver sp805_wdt_driver = {
 
 module_amba_driver(sp805_wdt_driver);
 
-MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
+MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
 MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
 MODULE_LICENSE("GPL");
index 862fbc206755511a6bd3a02eeaa525e992c545bc..564a7de17d99831083c46bc19fd859d40a5d51a2 100644 (file)
@@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
 
        ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
        if (ret)
-               btrfs_error(root->fs_info, ret, "kobj add dev failed");
+               btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret);
 
        printk_in_rcu(KERN_INFO
                      "BTRFS: dev_replace from %s (devid %llu) to %s started\n",
index a9aadb2ad5254cfe98d36a6eed1f12d4ae5e7925..f556c3732c2c16e22e0bcbd35f9ee1277179be5b 100644 (file)
@@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb,
            !extent_buffer_uptodate(chunk_root->node)) {
                printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
                       sb->s_id);
+               chunk_root->node = NULL;
                goto fail_tree_roots;
        }
        btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
@@ -2879,7 +2880,7 @@ retry_root_backup:
            !extent_buffer_uptodate(tree_root->node)) {
                printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
                       sb->s_id);
-
+               tree_root->node = NULL;
                goto recovery_tree_root;
        }
 
index 1c2bd1723e40ce8d1390223daf2bf4a7871ac051..07204bf601edac25ad2757bbd7c85961eeabc56c 100644 (file)
@@ -2296,9 +2296,22 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
 static inline struct btrfs_delayed_ref_node *
 select_delayed_ref(struct btrfs_delayed_ref_head *head)
 {
+       struct btrfs_delayed_ref_node *ref;
+
        if (list_empty(&head->ref_list))
                return NULL;
 
+       /*
+        * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
+        * This is to prevent a ref count from going down to zero, which deletes
+        * the extent item from the extent tree, when there still are references
+        * to add, which would fail because they would not find the extent item.
+        */
+       list_for_each_entry(ref, &head->ref_list, list) {
+               if (ref->action == BTRFS_ADD_DELAYED_REF)
+                       return ref;
+       }
+
        return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
                          list);
 }
@@ -4214,6 +4227,24 @@ out:
        space_info->chunk_alloc = 0;
        spin_unlock(&space_info->lock);
        mutex_unlock(&fs_info->chunk_mutex);
+       /*
+        * When we allocate a new chunk we reserve space in the chunk block
+        * reserve to make sure we can COW nodes/leafs in the chunk tree or
+        * add new nodes/leafs to it if we end up needing to do it when
+        * inserting the chunk item and updating device items as part of the
+        * second phase of chunk allocation, performed by
+        * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
+        * large number of new block groups to create in our transaction
+        * handle's new_bgs list to avoid exhausting the chunk block reserve
+        * in extreme cases - like having a single transaction create many new
+        * block groups when starting to write out the free space caches of all
+        * the block groups that were made dirty during the lifetime of the
+        * transaction.
+        */
+       if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
+               btrfs_create_pending_block_groups(trans, trans->root);
+               btrfs_trans_release_chunk_metadata(trans);
+       }
        return ret;
 }
 
index b33c0cf02668bde4d3dbeb60d0b1f411d58c648d..e33dff356460687fcade4b56202f83f318e3ebb4 100644 (file)
@@ -4209,7 +4209,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        u64 extent_num_bytes = 0;
        u64 extent_offset = 0;
        u64 item_end = 0;
-       u64 last_size = (u64)-1;
+       u64 last_size = new_size;
        u32 found_type = (u8)-1;
        int found_extent;
        int del_item;
@@ -4493,8 +4493,7 @@ out:
                        btrfs_abort_transaction(trans, root, ret);
        }
 error:
-       if (last_size != (u64)-1 &&
-           root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+       if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
                btrfs_ordered_update_i_size(inode, last_size, NULL);
 
        btrfs_free_path(path);
index 5d91776e12a215cddf666a2a1674a5ff5749c92e..0770c91586ca694e1f9c142fb653cf02c02dd686 100644 (file)
@@ -3090,7 +3090,7 @@ out_unlock:
 static long btrfs_ioctl_file_extent_same(struct file *file,
                        struct btrfs_ioctl_same_args __user *argp)
 {
-       struct btrfs_ioctl_same_args *same;
+       struct btrfs_ioctl_same_args *same = NULL;
        struct btrfs_ioctl_same_extent_info *info;
        struct inode *src = file_inode(file);
        u64 off;
@@ -3120,6 +3120,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
 
        if (IS_ERR(same)) {
                ret = PTR_ERR(same);
+               same = NULL;
                goto out;
        }
 
@@ -3190,6 +3191,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
 
 out:
        mnt_drop_write_file(file);
+       kfree(same);
        return ret;
 }
 
@@ -3586,6 +3588,20 @@ process_slot:
                                u64 trim = 0;
                                u64 aligned_end = 0;
 
+                               /*
+                                * Don't copy an inline extent into an offset
+                                * greater than zero. Having an inline extent
+                                * at such an offset results in chaos as btrfs
+                                * isn't prepared for such cases. Just skip
+                                * this case for the same reasons as commented
+                                * at btrfs_ioctl_clone().
+                                */
+                               if (last_dest_end > 0) {
+                                       ret = -EOPNOTSUPP;
+                                       btrfs_end_transaction(trans, root);
+                                       goto out;
+                               }
+
                                if (off > key.offset) {
                                        skip = off - key.offset;
                                        new_key.offset += skip;
index c0f18e7266b673c22357ebb033e40a4ec6e28b60..f5021fcb154e3bfa3773a79d94ee4750be48ca0d 100644 (file)
@@ -761,7 +761,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        if (!list_empty(&trans->ordered)) {
                spin_lock(&info->trans_lock);
-               list_splice(&trans->ordered, &cur_trans->pending_ordered);
+               list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
                spin_unlock(&info->trans_lock);
        }
 
@@ -1866,7 +1866,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        }
 
        spin_lock(&root->fs_info->trans_lock);
-       list_splice(&trans->ordered, &cur_trans->pending_ordered);
+       list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
        if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
                spin_unlock(&root->fs_info->trans_lock);
                atomic_inc(&cur_trans->use_count);
@@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 
-       if (current != root->fs_info->transaction_kthread)
+       if (current != root->fs_info->transaction_kthread &&
+           current != root->fs_info->cleaner_kthread)
                btrfs_run_delayed_iputs(root);
 
        return ret;
index 4d6a30e76168c3f548ce48dbbb1428e0baf81d49..b863a09cd2f1e9b35d53bbf284e3eb3c3cbeb226 100644 (file)
@@ -115,7 +115,7 @@ void config_item_init_type_name(struct config_item *item,
                                const char *name,
                                struct config_item_type *type)
 {
-       config_item_set_name(item, name);
+       config_item_set_name(item, "%s", name);
        item->ci_type = type;
        config_item_init(item);
 }
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(config_item_init_type_name);
 void config_group_init_type_name(struct config_group *group, const char *name,
                         struct config_item_type *type)
 {
-       config_item_set_name(&group->cg_item, name);
+       config_item_set_name(&group->cg_item, "%s", name);
        group->cg_item.ci_type = type;
        config_group_init(group);
 }
index c3e21ccfc358b2da1170c15f09a5946afad0ff16..a7f77e1fa18c25e62e8de5f809389d041e59266e 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
  * @vma: The virtual memory area where the fault occurred
  * @vmf: The description of the fault
  * @get_block: The filesystem method used to translate file offsets to blocks
+ * @complete_unwritten: The filesystem method used to convert unwritten blocks
+ *     to written so the data written to them is exposed. This is required for
+ *     required by write faults for filesystems that will return unwritten
+ *     extent mappings from @get_block, but it is optional for reads as
+ *     dax_insert_mapping() will always zero unwritten blocks. If the fs does
+ *     not support unwritten extents, the it should pass NULL.
  *
  * When a page fault occurs, filesystems may call this helper in their
  * fault handler for DAX files. __dax_fault() assumes the caller has done all
@@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
         * as for normal BH based IO completions.
         */
        error = dax_insert_mapping(inode, &bh, vma, vmf);
-       if (buffer_unwritten(&bh))
-               complete_unwritten(&bh, !error);
+       if (buffer_unwritten(&bh)) {
+               if (complete_unwritten)
+                       complete_unwritten(&bh, !error);
+               else
+                       WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
+       }
 
  out:
        if (error == -ENOMEM)
index 9bedfa8dd3a5305d1407ce04c5fa3319a6882cd3..f71e19a9dd3c18fc6ee7b3bcb4f3da9c8b8c3d4c 100644 (file)
@@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
                return 1;
        }
 
-       mark_inode_dirty(inode);
-
        if (!PageDirty(page)) {
                __set_page_dirty_nobuffers(page);
                update_dirty_page(inode, page);
index ada2a3dd701ad5eb22add128a0d5e89ebb98383f..b0f38c3b37f4d5577551e6d517e801a6f644ac09 100644 (file)
@@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
        if (ret)
                return ret;
 
-       if (f2fs_is_atomic_file(inode))
+       if (f2fs_is_atomic_file(inode)) {
+               clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
                commit_inmem_pages(inode, false);
+       }
 
        ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
        mnt_drop_write_file(filp);
-       clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
        return ret;
 }
 
@@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
        f2fs_balance_fs(F2FS_I_SB(inode));
 
        if (f2fs_is_atomic_file(inode)) {
-               commit_inmem_pages(inode, false);
                clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+               commit_inmem_pages(inode, false);
        }
 
        if (f2fs_is_volatile_file(inode))
index e1e73617d13b6cb5e1fca434e4ab823dfa534eca..22fb5ef37966210cb50f5b8679b7c661803128f9 100644 (file)
@@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
        if (!fio.encrypted_page)
                goto put_out;
 
-       f2fs_submit_page_bio(&fio);
+       err = f2fs_submit_page_bio(&fio);
+       if (err)
+               goto put_page_out;
+
+       /* write page */
+       lock_page(fio.encrypted_page);
+
+       if (unlikely(!PageUptodate(fio.encrypted_page)))
+               goto put_page_out;
+       if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
+               goto put_page_out;
+
+       set_page_dirty(fio.encrypted_page);
+       f2fs_wait_on_page_writeback(fio.encrypted_page, META);
+       if (clear_page_dirty_for_io(fio.encrypted_page))
+               dec_page_count(fio.sbi, F2FS_DIRTY_META);
+
+       set_page_writeback(fio.encrypted_page);
 
        /* allocate block address */
        f2fs_wait_on_page_writeback(dn.node_page, NODE);
-
        allocate_data_block(fio.sbi, NULL, fio.blk_addr,
                                        &fio.blk_addr, &sum, CURSEG_COLD_DATA);
-       dn.data_blkaddr = fio.blk_addr;
-
-       /* write page */
-       lock_page(fio.encrypted_page);
-       set_page_writeback(fio.encrypted_page);
        fio.rw = WRITE_SYNC;
        f2fs_submit_page_mbio(&fio);
 
+       dn.data_blkaddr = fio.blk_addr;
        set_data_blkaddr(&dn);
        f2fs_update_extent_cache(&dn);
        set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
        if (page->index == 0)
                set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
-
+put_page_out:
        f2fs_put_page(fio.encrypted_page, 1);
 put_out:
        f2fs_put_dnode(&dn);
@@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
                        .page = page,
                        .encrypted_page = NULL,
                };
+               set_page_dirty(page);
                f2fs_wait_on_page_writeback(page, DATA);
-
                if (clear_page_dirty_for_io(page))
                        inode_dec_dirty_pages(inode);
                set_cold_data(page);
index 38e75fb1e48812b38d477a13a53f0469fb95bbd3..a13ffcc329923b85f6dee0064e56681bf8a1269c 100644 (file)
@@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
        kunmap_atomic(dst_addr);
        SetPageUptodate(page);
 no_update:
+       set_page_dirty(page);
+
        /* clear dirty state */
        dirty = clear_page_dirty_for_io(page);
 
index 1eb343768781f3c4c89e0f6ecc2694d9a4c61c75..61b97f9cb9f657961d7ce12a490d55ece03e18a4 100644 (file)
@@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
                if (!abort) {
                        lock_page(cur->page);
                        if (cur->page->mapping == inode->i_mapping) {
+                               set_page_dirty(cur->page);
                                f2fs_wait_on_page_writeback(cur->page, DATA);
                                if (clear_page_dirty_for_io(cur->page))
                                        inode_dec_dirty_pages(inode);
index f0520bcf209442914eff0ed60d380fb3d2c66402..518c6294bf6c0ef965e9f56baa4e11d2eccc0165 100644 (file)
@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
        else
                wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
 }
+EXPORT_SYMBOL_GPL(wbc_account_io);
 
 /**
  * inode_congested - test whether an inode is congested
index e98d39d75cf41a3f63c889958f87804dbb6aacf2..b9dc23cd04f20626269a32b820881c8bdc1a8a82 100644 (file)
@@ -76,7 +76,7 @@ static int jfs_open(struct inode *inode, struct file *file)
                if (ji->active_ag == -1) {
                        struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb);
                        ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb);
-                       atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]);
+                       atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]);
                }
                spin_unlock_irq(&ji->ag_lock);
        }
index 6f1cb2b5ee285dd50622f719296fe71284d5f826..41aa3ca6a6a4995104d12f38cc5945ebb63a0f7d 100644 (file)
@@ -134,11 +134,11 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc)
         * It has been committed since the last change, but was still
         * on the dirty inode list.
         */
-        if (!test_cflag(COMMIT_Dirty, inode)) {
+       if (!test_cflag(COMMIT_Dirty, inode)) {
                /* Make sure committed changes hit the disk */
                jfs_flush_journal(JFS_SBI(inode->i_sb)->log, wait);
                return 0;
-        }
+       }
 
        if (jfs_commit_inode(inode, wait)) {
                jfs_err("jfs_write_inode: jfs_commit_inode failed!");
index e33be921aa41b5ae56a4054605aaeea5b8065987..a5ac97b9a933afcc8e91673b599f149e92184f16 100644 (file)
@@ -1160,7 +1160,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                rc = dtModify(tid, new_dir, &new_dname, &ino,
                              old_ip->i_ino, JFS_RENAME);
                if (rc)
-                       goto out4;
+                       goto out_tx;
                drop_nlink(new_ip);
                if (S_ISDIR(new_ip->i_mode)) {
                        drop_nlink(new_ip);
@@ -1185,7 +1185,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        if ((new_size = commitZeroLink(tid, new_ip)) < 0) {
                                txAbort(tid, 1);        /* Marks FS Dirty */
                                rc = new_size;
-                               goto out4;
+                               goto out_tx;
                        }
                        tblk = tid_to_tblock(tid);
                        tblk->xflag |= COMMIT_DELETE;
@@ -1203,7 +1203,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (rc) {
                        jfs_err("jfs_rename didn't expect dtSearch to fail "
                                "w/rc = %d", rc);
-                       goto out4;
+                       goto out_tx;
                }
 
                ino = old_ip->i_ino;
@@ -1211,7 +1211,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (rc) {
                        if (rc == -EIO)
                                jfs_err("jfs_rename: dtInsert returned -EIO");
-                       goto out4;
+                       goto out_tx;
                }
                if (S_ISDIR(old_ip->i_mode))
                        inc_nlink(new_dir);
@@ -1226,7 +1226,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                jfs_err("jfs_rename did not expect dtDelete to return rc = %d",
                        rc);
                txAbort(tid, 1);        /* Marks Filesystem dirty */
-               goto out4;
+               goto out_tx;
        }
        if (S_ISDIR(old_ip->i_mode)) {
                drop_nlink(old_dir);
@@ -1285,7 +1285,7 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        rc = txCommit(tid, ipcount, iplist, commit_flag);
 
-      out4:
+      out_tx:
        txEnd(tid);
        if (new_ip)
                mutex_unlock(&JFS_IP(new_ip)->commit_mutex);
@@ -1308,13 +1308,6 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        }
        if (new_ip && (new_ip->i_nlink == 0))
                set_cflag(COMMIT_Nolink, new_ip);
-      out3:
-       free_UCSname(&new_dname);
-      out2:
-       free_UCSname(&old_dname);
-      out1:
-       if (new_ip && !S_ISDIR(new_ip->i_mode))
-               IWRITE_UNLOCK(new_ip);
        /*
         * Truncating the directory index table is not guaranteed.  It
         * may need to be done iteratively
@@ -1325,7 +1318,13 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
                clear_cflag(COMMIT_Stale, old_dir);
        }
-
+       if (new_ip && !S_ISDIR(new_ip->i_mode))
+               IWRITE_UNLOCK(new_ip);
+      out3:
+       free_UCSname(&new_dname);
+      out2:
+       free_UCSname(&old_dname);
+      out1:
        jfs_info("jfs_rename: returning %d", rc);
        return rc;
 }
index 653faabb07f46b70f4d414519ae6093e2d52e5cf..d3d558ba4da7966de9699aeb98b2630bd7c6854e 100644 (file)
@@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
  * whether or not a lock was successfully freed by testing the return
  * value for -ENOENT.
  */
-static int flock_lock_file(struct file *filp, struct file_lock *request)
+static int flock_lock_inode(struct inode *inode, struct file_lock *request)
 {
        struct file_lock *new_fl = NULL;
        struct file_lock *fl;
        struct file_lock_context *ctx;
-       struct inode *inode = file_inode(filp);
        int error = 0;
        bool found = false;
        LIST_HEAD(dispose);
@@ -890,7 +889,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
                goto find_conflict;
 
        list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
-               if (filp != fl->fl_file)
+               if (request->fl_file != fl->fl_file)
                        continue;
                if (request->fl_type == fl->fl_type)
                        goto out;
@@ -1164,20 +1163,19 @@ int posix_lock_file(struct file *filp, struct file_lock *fl,
 EXPORT_SYMBOL(posix_lock_file);
 
 /**
- * posix_lock_file_wait - Apply a POSIX-style lock to a file
- * @filp: The file to apply the lock to
+ * posix_lock_inode_wait - Apply a POSIX-style lock to a file
+ * @inode: inode of file to which lock request should be applied
  * @fl: The lock to be applied
  *
- * Add a POSIX style lock to a file.
- * We merge adjacent & overlapping locks whenever possible.
- * POSIX locks are sorted by owner task, then by starting address
+ * Variant of posix_lock_file_wait that does not take a filp, and so can be
+ * used after the filp has already been torn down.
  */
-int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
 {
        int error;
        might_sleep ();
        for (;;) {
-               error = posix_lock_file(filp, fl, NULL);
+               error = __posix_lock_file(inode, fl, NULL);
                if (error != FILE_LOCK_DEFERRED)
                        break;
                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1189,7 +1187,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
        }
        return error;
 }
-EXPORT_SYMBOL(posix_lock_file_wait);
+EXPORT_SYMBOL(posix_lock_inode_wait);
 
 /**
  * locks_mandatory_locked - Check for an active lock
@@ -1851,18 +1849,18 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
 }
 
 /**
- * flock_lock_file_wait - Apply a FLOCK-style lock to a file
- * @filp: The file to apply the lock to
+ * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
+ * @inode: inode of the file to apply to
  * @fl: The lock to be applied
  *
- * Add a FLOCK style lock to a file.
+ * Apply a FLOCK style lock request to an inode.
  */
-int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
 {
        int error;
        might_sleep();
        for (;;) {
-               error = flock_lock_file(filp, fl);
+               error = flock_lock_inode(inode, fl);
                if (error != FILE_LOCK_DEFERRED)
                        break;
                error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
@@ -1874,8 +1872,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
        }
        return error;
 }
-
-EXPORT_SYMBOL(flock_lock_file_wait);
+EXPORT_SYMBOL(flock_lock_inode_wait);
 
 /**
  *     sys_flock: - flock() system call.
@@ -2401,7 +2398,8 @@ locks_remove_flock(struct file *filp)
                .fl_type = F_UNLCK,
                .fl_end = OFFSET_MAX,
        };
-       struct file_lock_context *flctx = file_inode(filp)->i_flctx;
+       struct inode *inode = file_inode(filp);
+       struct file_lock_context *flctx = inode->i_flctx;
 
        if (list_empty(&flctx->flc_flock))
                return;
@@ -2409,7 +2407,7 @@ locks_remove_flock(struct file *filp)
        if (filp->f_op->flock)
                filp->f_op->flock(filp, F_SETLKW, &fl);
        else
-               flock_lock_file(filp, &fl);
+               flock_lock_inode(inode, &fl);
 
        if (fl.fl_ops && fl.fl_ops->fl_release_private)
                fl.fl_ops->fl_release_private(&fl);
index c7cb8a526c05fbaa5ba18934cd04eda1eb2a6e60..2b8aa15fd6dfa755368ddf21136eaab5192f1e28 100644 (file)
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
        UMOUNT_PROPAGATE = 2,
        UMOUNT_CONNECTED = 4,
 };
+
+static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
+{
+       /* Leaving mounts connected is only valid for lazy umounts */
+       if (how & UMOUNT_SYNC)
+               return true;
+
+       /* A mount without a parent has nothing to be connected to */
+       if (!mnt_has_parent(mnt))
+               return true;
+
+       /* Because the reference counting rules change when mounts are
+        * unmounted and connected, umounted mounts may not be
+        * connected to mounted mounts.
+        */
+       if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
+               return true;
+
+       /* Has it been requested that the mount remain connected? */
+       if (how & UMOUNT_CONNECTED)
+               return false;
+
+       /* Is the mount locked such that it needs to remain connected? */
+       if (IS_MNT_LOCKED(mnt))
+               return false;
+
+       /* By default disconnect the mount */
+       return true;
+}
+
 /*
  * mount_lock must be held
  * namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
                if (how & UMOUNT_SYNC)
                        p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
 
-               disconnect = !(((how & UMOUNT_CONNECTED) &&
-                               mnt_has_parent(p) &&
-                               (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
-                              IS_MNT_LOCKED_AND_LAZY(p));
+               disconnect = disconnect_mount(p, how);
 
                pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
                                 disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
        while (!hlist_empty(&mp->m_list)) {
                mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
                if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
-                       struct mount *p, *tmp;
-                       list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
-                               hlist_add_head(&p->mnt_umount.s_list, &unmounted);
-                               umount_mnt(p);
-                       }
+                       hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
+                       umount_mnt(mnt);
                }
                else umount_tree(mnt, UMOUNT_CONNECTED);
        }
index ecebb406cc1aec554ce780f2c3680eddc351e8da..4a90c9bb31357305ed6f38166bb0e9afabaae953 100644 (file)
@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
        server->options = data->options;
        server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
                NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
-               NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
+               NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
 
        if (data->rsize)
                server->rsize = nfs_block_size(data->rsize, NULL);
index c12951b9551eab8b0394ed2aa218cf15ce6f2c39..b3289d701eea21623f4081fee1b2807e3e2f4b3a 100644 (file)
@@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
        struct nfs42_layoutstat_devinfo *devinfo;
        int i;
 
-       for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) {
+       for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
                if (*dev_count >= dev_limit)
                        break;
                mirror = FF_LAYOUT_COMP(pls, i);
index b77b328a06d74f0124d2a65b51fac0fc21fbd692..0adc7d245b3dd838e32371920e23d9dda5071ee0 100644 (file)
@@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                        nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
                        inode->i_version = fattr->change_attr;
-               else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
-                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+               else
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
+                               | NFS_INO_REVAL_PAGECACHE);
                if (fattr->valid & NFS_ATTR_FATTR_SIZE)
                        inode->i_size = nfs_size_to_loff_t(fattr->size);
                else
@@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
        if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
                cur_size = i_size_read(inode);
                new_isize = nfs_size_to_loff_t(fattr->size);
-               if (cur_size != new_isize && nfsi->nrequests == 0)
+               if (cur_size != new_isize)
                        invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
        }
+       if (nfsi->nrequests != 0)
+               invalid &= ~NFS_INO_REVAL_PAGECACHE;
 
        /* Have any file permissions changed? */
        if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        invalid |= NFS_INO_INVALID_ATTR
                                | NFS_INO_INVALID_DATA
                                | NFS_INO_INVALID_ACCESS
-                               | NFS_INO_INVALID_ACL
-                               | NFS_INO_REVAL_PAGECACHE;
+                               | NFS_INO_INVALID_ACL;
                        if (S_ISDIR(inode->i_mode))
                                nfs_force_lookup_revalidate(inode);
                        inode->i_version = fattr->change_attr;
                }
-       } else if (server->caps & NFS_CAP_CHANGE_ATTR)
+       } else
                nfsi->cache_validity |= save_cache_validity;
 
        if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
@@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
                                i_size_write(inode, new_isize);
                                invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
-                               invalid &= ~NFS_INO_REVAL_PAGECACHE;
                        }
                        dprintk("NFS: isize change on server for file %s/%ld "
                                        "(%Ld to %Ld)\n",
index 7e3c4604bea8a6e6e92906b6c2096adb6df5ca3f..9b372b845f6a6ff06a4a035e2f6799d7d29cd8f7 100644 (file)
@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
 
 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
 extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
+static inline struct nfs4_label *
+nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
+{
+       if (!dst || !src)
+               return NULL;
+
+       if (src->len > NFS4_MAXLABELLEN)
+               return NULL;
+
+       dst->lfs = src->lfs;
+       dst->pi = src->pi;
+       dst->len = src->len;
+       memcpy(dst->label, src->label, src->len);
+
+       return dst;
+}
 static inline void nfs4_label_free(struct nfs4_label *label)
 {
        if (label) {
@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
 static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
 {
 }
+static inline struct nfs4_label *
+nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
+{
+       return NULL;
+}
 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
 
 /* proc.c */
index f486b80f927ab7204159852a9740900a6c73aec6..d731bbf974aaf1d4bf695c2cb57a99cc586e0258 100644 (file)
@@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
        return err;
 }
 
-loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
 {
        struct inode *inode = file_inode(filep);
        struct nfs42_seek_args args = {
@@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
        return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
 }
 
+loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+{
+       struct nfs_server *server = NFS_SERVER(file_inode(filep));
+       struct nfs4_exception exception = { };
+       int err;
+
+       do {
+               err = _nfs42_proc_llseek(filep, offset, whence);
+               if (err == -ENOTSUPP)
+                       return -EOPNOTSUPP;
+               err = nfs4_handle_exception(server, err, &exception);
+       } while (exception.retry);
+
+       return err;
+}
+
+
 static void
 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
 {
index 6f228b5af819ea576240c40869c1da74d823e460..3acb1eb72930c40828bab90aeb27a3918f71138d 100644 (file)
@@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
 
 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
 {
-       do_renew_lease(server->nfs_client, timestamp);
+       struct nfs_client *clp = server->nfs_client;
+
+       if (!nfs4_has_session(clp))
+               do_renew_lease(clp, timestamp);
 }
 
 struct nfs4_call_sync_data {
@@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
                clp = session->clp;
                do_renew_lease(clp, res->sr_timestamp);
                /* Check sequence flags */
-               if (res->sr_status_flags != 0)
-                       nfs4_schedule_lease_recovery(clp);
+               nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
                nfs41_update_target_slotid(slot->table, slot, res);
                break;
        case 1:
@@ -910,6 +912,7 @@ struct nfs4_opendata {
        struct nfs_open_confirmres c_res;
        struct nfs4_string owner_name;
        struct nfs4_string group_name;
+       struct nfs4_label *a_label;
        struct nfs_fattr f_attr;
        struct nfs4_label *f_label;
        struct dentry *dir;
@@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        if (IS_ERR(p->f_label))
                goto err_free_p;
 
+       p->a_label = nfs4_label_alloc(server, gfp_mask);
+       if (IS_ERR(p->a_label))
+               goto err_free_f;
+
        alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
        p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
        if (IS_ERR(p->o_arg.seqid))
@@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        p->o_arg.server = server;
        p->o_arg.bitmask = nfs4_bitmask(server, label);
        p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
-       p->o_arg.label = label;
+       p->o_arg.label = nfs4_label_copy(p->a_label, label);
        p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
        switch (p->o_arg.claim) {
        case NFS4_OPEN_CLAIM_NULL:
@@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
        return p;
 
 err_free_label:
+       nfs4_label_free(p->a_label);
+err_free_f:
        nfs4_label_free(p->f_label);
 err_free_p:
        kfree(p);
@@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref)
                nfs4_put_open_state(p->state);
        nfs4_put_state_owner(p->owner);
 
+       nfs4_label_free(p->a_label);
        nfs4_label_free(p->f_label);
 
        dput(p->dir);
@@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
 
 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
 {
+       if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
+               return;
        if (state->n_wronly)
                set_bit(NFS_O_WRONLY_STATE, &state->flags);
        if (state->n_rdonly)
                set_bit(NFS_O_RDONLY_STATE, &state->flags);
        if (state->n_rdwr)
                set_bit(NFS_O_RDWR_STATE, &state->flags);
+       set_bit(NFS_OPEN_STATE, &state->flags);
 }
 
 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
@@ -5439,15 +5452,15 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
        return err;
 }
 
-static int do_vfs_lock(struct file *file, struct file_lock *fl)
+static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
 {
        int res = 0;
        switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
                case FL_POSIX:
-                       res = posix_lock_file_wait(file, fl);
+                       res = posix_lock_inode_wait(inode, fl);
                        break;
                case FL_FLOCK:
-                       res = flock_lock_file_wait(file, fl);
+                       res = flock_lock_inode_wait(inode, fl);
                        break;
                default:
                        BUG();
@@ -5484,7 +5497,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
        atomic_inc(&lsp->ls_count);
        /* Ensure we don't close file until we're done freeing locks! */
        p->ctx = get_nfs_open_context(ctx);
-       get_file(fl->fl_file);
        memcpy(&p->fl, fl, sizeof(p->fl));
        p->server = NFS_SERVER(inode);
        return p;
@@ -5496,7 +5508,6 @@ static void nfs4_locku_release_calldata(void *data)
        nfs_free_seqid(calldata->arg.seqid);
        nfs4_put_lock_state(calldata->lsp);
        put_nfs_open_context(calldata->ctx);
-       fput(calldata->fl.fl_file);
        kfree(calldata);
 }
 
@@ -5509,7 +5520,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
        switch (task->tk_status) {
                case 0:
                        renew_lease(calldata->server, calldata->timestamp);
-                       do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
+                       do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
                        if (nfs4_update_lock_stateid(calldata->lsp,
                                        &calldata->res.stateid))
                                break;
@@ -5617,7 +5628,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
        mutex_lock(&sp->so_delegreturn_mutex);
        /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
        down_read(&nfsi->rwsem);
-       if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
+       if (do_vfs_lock(inode, request) == -ENOENT) {
                up_read(&nfsi->rwsem);
                mutex_unlock(&sp->so_delegreturn_mutex);
                goto out;
@@ -5758,7 +5769,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
                                data->timestamp);
                if (data->arg.new_lock) {
                        data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
-                       if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
+                       if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
                                rpc_restart_call_prepare(task);
                                break;
                        }
@@ -6000,7 +6011,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
        if (status != 0)
                goto out;
        request->fl_flags |= FL_ACCESS;
-       status = do_vfs_lock(request->fl_file, request);
+       status = do_vfs_lock(state->inode, request);
        if (status < 0)
                goto out;
        down_read(&nfsi->rwsem);
@@ -6008,7 +6019,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
                /* Yes: cache locks! */
                /* ...but avoid races with delegation recall... */
                request->fl_flags = fl_flags & ~FL_SLEEP;
-               status = do_vfs_lock(request->fl_file, request);
+               status = do_vfs_lock(state->inode, request);
                up_read(&nfsi->rwsem);
                goto out;
        }
@@ -7573,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
                goto out;
        }
        ret = rpc_wait_for_completion_task(task);
-       if (!ret) {
-               struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
-
-               if (task->tk_status == 0)
-                       nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+       if (!ret)
                ret = task->tk_status;
-       }
        rpc_put_task(task);
 out:
        dprintk("<-- %s status=%d\n", __func__, ret);
@@ -7967,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata)
 {
        struct nfs4_layoutreturn *lrp = calldata;
        struct pnfs_layout_hdr *lo = lrp->args.layout;
+       LIST_HEAD(freeme);
 
        dprintk("--> %s\n", __func__);
        spin_lock(&lo->plh_inode->i_lock);
        if (lrp->res.lrs_present)
                pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+       pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
        pnfs_clear_layoutreturn_waitbit(lo);
-       clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
-       rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
        lo->plh_block_lgets--;
        spin_unlock(&lo->plh_inode->i_lock);
+       pnfs_free_lseg_list(&freeme);
        pnfs_put_layout_hdr(lrp->args.layout);
        nfs_iput_and_deactive(lrp->inode);
        kfree(calldata);
@@ -8590,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
        .minor_version = 0,
        .init_caps = NFS_CAP_READDIRPLUS
                | NFS_CAP_ATOMIC_OPEN
-               | NFS_CAP_CHANGE_ATTR
                | NFS_CAP_POSIX_LOCK,
        .init_client = nfs40_init_client,
        .shutdown_client = nfs40_shutdown_client,
@@ -8616,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
        .minor_version = 1,
        .init_caps = NFS_CAP_READDIRPLUS
                | NFS_CAP_ATOMIC_OPEN
-               | NFS_CAP_CHANGE_ATTR
                | NFS_CAP_POSIX_LOCK
                | NFS_CAP_STATEID_NFSV41
                | NFS_CAP_ATOMIC_OPEN_V1,
@@ -8639,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
        .minor_version = 2,
        .init_caps = NFS_CAP_READDIRPLUS
                | NFS_CAP_ATOMIC_OPEN
-               | NFS_CAP_CHANGE_ATTR
                | NFS_CAP_POSIX_LOCK
                | NFS_CAP_STATEID_NFSV41
                | NFS_CAP_ATOMIC_OPEN_V1
index 605840dc89cf9e28c173659af201aab109f9328d..f2e2ad8944617f679a4a85934f6a276d3665229d 100644 (file)
@@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
        }
 }
 
-static void nfs41_handle_state_revoked(struct nfs_client *clp)
+static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
 {
        nfs4_reset_all_state(clp);
        dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
 }
 
+static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
+{
+       nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
+       nfs4_schedule_state_manager(clp);
+
+       dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
+}
+
 static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
 {
-       /* This will need to handle layouts too */
-       nfs_expire_all_delegations(clp);
+       /* FIXME: For now, we destroy all layouts. */
+       pnfs_destroy_all_layouts(clp);
+       /* FIXME: For now, we test all delegations+open state+locks. */
+       nfs41_handle_some_state_revoked(clp);
        dprintk("%s: Recallable state revoked on server %s!\n", __func__,
                        clp->cl_hostname);
 }
 
 static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
 {
-       nfs_expire_all_delegations(clp);
-       if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
-               nfs4_schedule_state_manager(clp);
+       set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+       nfs4_schedule_state_manager(clp);
+
        dprintk("%s: server %s declared a backchannel fault\n", __func__,
                        clp->cl_hostname);
 }
@@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
 
        if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
                nfs41_handle_server_reboot(clp);
-       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
-                           SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
+       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
+               nfs41_handle_all_state_revoked(clp);
+       if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
                            SEQ4_STATUS_ADMIN_STATE_REVOKED))
-               nfs41_handle_state_revoked(clp);
+               nfs41_handle_some_state_revoked(clp);
        if (flags & SEQ4_STATUS_LEASE_MOVED)
                nfs4_schedule_lease_moved_recovery(clp);
        if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
index 1da68d3b1edabdb78c60527f502af5f40d6cf69b..4984bbe55ff1eed1623df2196bc0de0de41a4304 100644 (file)
@@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
                mirror->pg_base = 0;
                mirror->pg_recoalesce = 0;
 
-               desc->pg_moreio = 0;
-
                while (!list_empty(&head)) {
                        struct nfs_page *req;
 
@@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
                        nfs_list_remove_request(req);
                        if (__nfs_pageio_add_request(desc, req))
                                continue;
-                       if (desc->pg_error < 0)
+                       if (desc->pg_error < 0) {
+                               list_splice_tail(&head, &mirror->pg_list);
+                               mirror->pg_recoalesce = 1;
                                return 0;
+                       }
                        break;
                }
        } while (mirror->pg_recoalesce);
index 0ba9a02c95664960f8c0f46ea97249bd8653fe16..70bf706b10904e156affe9dd4bea2ec9a17776c5 100644 (file)
@@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
 {
        struct pnfs_layout_segment *s;
 
-       if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
+       if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
                return false;
 
        list_for_each_entry(s, &lo->plh_segs, pls_list)
@@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
        return true;
 }
 
+static bool
+pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
+{
+       if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+               return false;
+       lo->plh_return_iomode = 0;
+       lo->plh_block_lgets++;
+       pnfs_get_layout_hdr(lo);
+       clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
+       return true;
+}
+
 static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
                struct pnfs_layout_hdr *lo, struct inode *inode)
 {
@@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
        if (pnfs_layout_need_return(lo, lseg)) {
                nfs4_stateid stateid;
                enum pnfs_iomode iomode;
+               bool send;
 
                stateid = lo->plh_stateid;
                iomode = lo->plh_return_iomode;
-               /* decreased in pnfs_send_layoutreturn() */
-               lo->plh_block_lgets++;
-               lo->plh_return_iomode = 0;
+               send = pnfs_prepare_layoutreturn(lo);
                spin_unlock(&inode->i_lock);
-               pnfs_get_layout_hdr(lo);
-
-               /* Send an async layoutreturn so we dont deadlock */
-               pnfs_send_layoutreturn(lo, stateid, iomode, false);
+               if (send) {
+                       /* Send an async layoutreturn so we dont deadlock */
+                       pnfs_send_layoutreturn(lo, stateid, iomode, false);
+               }
        } else
                spin_unlock(&inode->i_lock);
 }
@@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
                pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
 
        if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
+               if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
+                       spin_unlock(&inode->i_lock);
+                       return;
+               }
                pnfs_get_layout_hdr(lo);
                pnfs_layout_remove_lseg(lo, lseg);
                spin_unlock(&inode->i_lock);
@@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
                test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
        if (atomic_dec_and_test(&lseg->pls_refcount)) {
                struct pnfs_layout_hdr *lo = lseg->pls_layout;
+               if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
+                       return;
                pnfs_get_layout_hdr(lo);
                pnfs_layout_remove_lseg(lo, lseg);
                pnfs_free_lseg_async(lseg);
@@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
        clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
        smp_mb__after_atomic();
        wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
+       rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
 }
 
 static int
@@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino)
        LIST_HEAD(tmp_list);
        nfs4_stateid stateid;
        int status = 0, empty;
+       bool send;
 
        dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
 
@@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino)
        /* Don't send a LAYOUTRETURN if list was initially empty */
        if (empty) {
                spin_unlock(&ino->i_lock);
-               pnfs_put_layout_hdr(lo);
                dprintk("NFS: %s no layout segments to return\n", __func__);
-               goto out;
+               goto out_put_layout_hdr;
        }
 
        set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-       lo->plh_block_lgets++;
+       send = pnfs_prepare_layoutreturn(lo);
        spin_unlock(&ino->i_lock);
        pnfs_free_lseg_list(&tmp_list);
-
-       status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
+       if (send)
+               status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
+out_put_layout_hdr:
+       pnfs_put_layout_hdr(lo);
 out:
        dprintk("<-- %s status: %d\n", __func__, status);
        return status;
@@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino)
 out_noroc:
        if (lo) {
                stateid = lo->plh_stateid;
-               layoutreturn =
-                       test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
-                                          &lo->plh_flags);
-               if (layoutreturn) {
-                       lo->plh_block_lgets++;
-                       pnfs_get_layout_hdr(lo);
-               }
+               if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+                                          &lo->plh_flags))
+                       layoutreturn = pnfs_prepare_layoutreturn(lo);
        }
        spin_unlock(&ino->i_lock);
        if (layoutreturn) {
@@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
        struct pnfs_layout_segment *lseg;
        nfs4_stateid stateid;
        u32 current_seqid;
-       bool found = false, layoutreturn = false;
+       bool layoutreturn = false;
 
        spin_lock(&ino->i_lock);
-       list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
-               if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
-                       rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
-                       found = true;
-                       goto out;
-               }
+       list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
+               if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
+                       continue;
+               if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
+                       continue;
+               rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+               spin_unlock(&ino->i_lock);
+               return true;
+       }
        lo = nfsi->layout;
        current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
 
@@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
         * a barrier, we choose the worst-case barrier.
         */
        *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
-out:
-       if (!found) {
-               stateid = lo->plh_stateid;
-               layoutreturn =
-                       test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
-                                          &lo->plh_flags);
-               if (layoutreturn) {
-                       lo->plh_block_lgets++;
-                       pnfs_get_layout_hdr(lo);
-               }
-       }
+       stateid = lo->plh_stateid;
+       if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+                                          &lo->plh_flags))
+               layoutreturn = pnfs_prepare_layoutreturn(lo);
+       if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+               rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
+
        spin_unlock(&ino->i_lock);
        if (layoutreturn) {
-               rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
                pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
+               return true;
        }
-       return found;
+       return false;
 }
 
 /*
@@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
        spin_lock(&inode->i_lock);
        /* set failure bit so that pnfs path will be retried later */
        pnfs_layout_set_fail_bit(lo, iomode);
-       set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
        if (lo->plh_return_iomode == 0)
                lo->plh_return_iomode = range.iomode;
        else if (lo->plh_return_iomode != range.iomode)
@@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
        if (ld->prepare_layoutcommit) {
                status = ld->prepare_layoutcommit(&data->args);
                if (status) {
+                       put_rpccred(data->cred);
                        spin_lock(&inode->i_lock);
                        set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
                        if (end_pos > nfsi->layout->plh_lwb)
                                nfsi->layout->plh_lwb = end_pos;
-                       spin_unlock(&inode->i_lock);
-                       put_rpccred(data->cred);
-                       goto clear_layoutcommitting;
+                       goto out_unlock;
                }
        }
 
index 65869ca9c851dbf4f0b289ca84865a018c2b6e57..75a35a1afa7944d4ac54bd94994cddf1fd05ab54 100644 (file)
@@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
 {
        struct nfs_pgio_args *argp = &hdr->args;
        struct nfs_pgio_res *resp = &hdr->res;
+       u64 size = argp->offset + resp->count;
 
        if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
+               fattr->size = size;
+       if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
+               fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
                return;
-       if (argp->offset + resp->count != fattr->size)
-               return;
-       if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
+       }
+       if (size != fattr->size)
                return;
        /* Set attribute barrier */
        nfs_fattr_set_barrier(fattr);
+       /* ...and update size */
+       fattr->valid |= NFS_ATTR_FATTR_SIZE;
 }
 
 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
 {
-       struct nfs_fattr *fattr = hdr->res.fattr;
+       struct nfs_fattr *fattr = &hdr->fattr;
        struct inode *inode = hdr->inode;
 
-       if (fattr == NULL)
-               return;
        spin_lock(&inode->i_lock);
        nfs_writeback_check_extend(hdr, fattr);
        nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
index 7114ce6e6b9ef038f415550b925ac50a95be978c..0fcdbe7ca6480e7950092cfd0dc7d0ce431ef548 100644 (file)
@@ -20,8 +20,6 @@
 #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
 #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
 #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
-#define IS_MNT_LOCKED_AND_LAZY(m) \
-       (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
 
 #define CL_EXPIRE              0x01
 #define CL_SLAVE               0x02
index d751fcb637bb73d756b7da9c4b0aa337d53647b2..1ade1206bb896f50ad326d281c42ec56e66d8372 100644 (file)
@@ -75,3 +75,9 @@ config PROC_PAGE_MONITOR
 config PROC_CHILDREN
        bool "Include /proc/<pid>/task/<tid>/children file"
        default n
+       help
+         Provides a fast way to retrieve first level children pids of a task. See
+         <file:Documentation/filesystems/proc.txt> for more information.
+
+         Say Y if you are running any user-space software which takes benefit from
+         this interface. For example, rkt is such a piece of software.
index 87782e874b6af4523adff3e7e721046d5f857ee0..aa50d1ac28fc6189a9489d1b679fcf86115e633c 100644 (file)
@@ -243,6 +243,11 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
        len1 = arg_end - arg_start;
        len2 = env_end - env_start;
 
+       /* Empty ARGV. */
+       if (len1 == 0) {
+               rv = 0;
+               goto out_free_page;
+       }
        /*
         * Inherently racy -- command line shares address space
         * with code and data.
index 91a4e6426321885eaa226081be2c39ad35a95f74..92e6726f6e3732573bd9a64f3b313cc3508ce519 100644 (file)
@@ -92,7 +92,7 @@ static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
                             roundup(sizeof(CORE_STR), 4)) +
                        roundup(sizeof(struct elf_prstatus), 4) +
                        roundup(sizeof(struct elf_prpsinfo), 4) +
-                       roundup(sizeof(struct task_struct), 4);
+                       roundup(arch_task_struct_size, 4);
        *elf_buflen = PAGE_ALIGN(*elf_buflen);
        return size + *elf_buflen;
 }
@@ -415,7 +415,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
        /* set up the task structure */
        notes[2].name   = CORE_STR;
        notes[2].type   = NT_TASKSTRUCT;
-       notes[2].datasz = sizeof(struct task_struct);
+       notes[2].datasz = arch_task_struct_size;
        notes[2].data   = current;
 
        nhdr->p_filesz  += notesize(&notes[2]);
index 6afac3d561ac81f6f5861f86a0dc4eb2a7d1fca2..8d0b3ade0ff0ef8a65f8714330a3bfd49201c0d8 100644 (file)
@@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                       iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
                                        sizeof(struct unallocSpaceEntry));
                use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
-               use->descTag.tagLocation =
-                               cpu_to_le32(iinfo->i_location.logicalBlockNum);
-               crclen = sizeof(struct unallocSpaceEntry) +
-                               iinfo->i_lenAlloc - sizeof(struct tag);
-               use->descTag.descCRCLength = cpu_to_le16(crclen);
-               use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
-                                                          sizeof(struct tag),
-                                                          crclen));
-               use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
+               crclen = sizeof(struct unallocSpaceEntry);
 
-               goto out;
+               goto finish;
        }
 
        if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
@@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
                crclen = sizeof(struct extendedFileEntry);
        }
+
+finish:
        if (iinfo->i_strat4096) {
                fe->icbTag.strategyType = cpu_to_le16(4096);
                fe->icbTag.strategyParameter = cpu_to_le16(1);
@@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                fe->icbTag.numEntries = cpu_to_le16(1);
        }
 
-       if (S_ISDIR(inode->i_mode))
+       if (iinfo->i_use)
+               fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
+       else if (S_ISDIR(inode->i_mode))
                fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
        else if (S_ISREG(inode->i_mode))
                fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
@@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
                                                  crclen));
        fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
 
-out:
        set_buffer_uptodate(bh);
        unlock_buffer(bh);
 
index 20de88d1bf86205d126d5e27b43298af9b836a79..dd714037c322d0009d8df40e73bf5adf3aa892d4 100644 (file)
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
        struct xfs_buf  *bp)
 {
        struct xfs_mount *mp = bp->b_target->bt_mount;
-       struct xfs_buf_log_item *bip = bp->b_fspriv;
+       int             blksize = mp->m_attr_geo->blksize;
        char            *ptr;
        int             len;
        xfs_daddr_t     bno;
-       int             blksize = mp->m_attr_geo->blksize;
 
        /* no verification of non-crc buffers */
        if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
        ASSERT(len >= blksize);
 
        while (len > 0) {
+               struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+
                if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
                        xfs_buf_ioerror(bp, -EFSCORRUPTED);
                        xfs_verifier_error(bp);
                        return;
                }
-               if (bip) {
-                       struct xfs_attr3_rmt_hdr *rmt;
 
-                       rmt = (struct xfs_attr3_rmt_hdr *)ptr;
-                       rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
+               /*
+                * Ensure we aren't writing bogus LSNs to disk. See
+                * xfs_attr3_rmt_hdr_set() for the explanation.
+                */
+               if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
+                       xfs_buf_ioerror(bp, -EFSCORRUPTED);
+                       xfs_verifier_error(bp);
+                       return;
                }
                xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
 
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
        rmt->rm_owner = cpu_to_be64(ino);
        rmt->rm_blkno = cpu_to_be64(bno);
 
+       /*
+        * Remote attribute blocks are written synchronously, so we don't
+        * have an LSN that we can stamp in them that makes any sense to log
+        * recovery. To ensure that log recovery handles overwrites of these
+        * blocks sanely (i.e. once they've been freed and reallocated as some
+        * other type of metadata) we need to ensure that the LSN has a value
+        * that tells log recovery to ignore the LSN and overwrite the buffer
+        * with whatever is in it's log. To do this, we use the magic
+        * NULLCOMMITLSN to indicate that the LSN is invalid.
+        */
+       rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
+
        return sizeof(struct xfs_attr3_rmt_hdr);
 }
 
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
 
                /*
                 * Allocate a single extent, up to the size of the value.
+                *
+                * Note that we have to consider this a data allocation as we
+                * write the remote attribute without logging the contents.
+                * Hence we must ensure that we aren't using blocks that are on
+                * the busy list so that we don't overwrite blocks which have
+                * recently been freed but their transactions are not yet
+                * committed to disk. If we overwrite the contents of a busy
+                * extent and then crash then the block may not contain the
+                * correct metadata after log recovery occurs.
                 */
                xfs_bmap_init(args->flist, args->firstblock);
                nmap = 1;
                error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
-                                 blkcnt,
-                                 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
-                                 args->firstblock, args->total, &map, &nmap,
-                                 args->flist);
+                                 blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
+                                 args->total, &map, &nmap, args->flist);
                if (!error) {
                        error = xfs_bmap_finish(&args->trans, args->flist,
                                                &committed);
index f0e8249722d40a0dcaf9f31bc25effaba142b0a9..db4acc1c3e73479cdf32322944a7fa1bcf34b96f 100644 (file)
@@ -1514,18 +1514,27 @@ xfs_filemap_fault(
        struct vm_area_struct   *vma,
        struct vm_fault         *vmf)
 {
-       struct xfs_inode        *ip = XFS_I(file_inode(vma->vm_file));
+       struct inode            *inode = file_inode(vma->vm_file);
        int                     ret;
 
-       trace_xfs_filemap_fault(ip);
+       trace_xfs_filemap_fault(XFS_I(inode));
 
        /* DAX can shortcut the normal fault path on write faults! */
-       if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip)))
+       if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
                return xfs_filemap_page_mkwrite(vma, vmf);
 
-       xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
-       ret = filemap_fault(vma, vmf);
-       xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+       if (IS_DAX(inode)) {
+               /*
+                * we do not want to trigger unwritten extent conversion on read
+                * faults - that is unnecessary overhead and would also require
+                * changes to xfs_get_blocks_direct() to map unwritten extent
+                * ioend for conversion on read-only mappings.
+                */
+               ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL);
+       } else
+               ret = filemap_fault(vma, vmf);
+       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
        return ret;
 }
index 01dd228ca05e315b88feb7afbfaf3e81d728878b..480ebba8464f38dbb0608b579185ad93a0600913 100644 (file)
@@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn(
                uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
                break;
        case XFS_ATTR3_RMT_MAGIC:
-               lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
-               uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
-               break;
+               /*
+                * Remote attr blocks are written synchronously, rather than
+                * being logged. That means they do not contain a valid LSN
+                * (i.e. transactionally ordered) in them, and hence any time we
+                * see a buffer to replay over the top of a remote attribute
+                * block we should simply do so.
+                */
+               goto recover_immediately;
        case XFS_SB_MAGIC:
                lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
                uuid = &((struct xfs_dsb *)blk)->sb_uuid;
diff --git a/include/asm-generic/mm-arch-hooks.h b/include/asm-generic/mm-arch-hooks.h
new file mode 100644 (file)
index 0000000..5ff0e51
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Architecture specific mm hooks
+ */
+
+#ifndef _ASM_GENERIC_MM_ARCH_HOOKS_H
+#define _ASM_GENERIC_MM_ARCH_HOOKS_H
+
+/*
+ * This file should be included through arch/../include/asm/Kbuild for
+ * the architecture which doesn't need specific mm hooks.
+ *
+ * In that case, the generic hooks defined in include/linux/mm-arch-hooks.h
+ * are used.
+ */
+
+#endif /* _ASM_GENERIC_MM_ARCH_HOOKS_H */
index c7df89f99115c82bdd8f5accc7199bd6034d5e6f..58fe9e8b6fd7f305f46beeda817a66f0fb8ffccb 100644 (file)
@@ -2,7 +2,7 @@
  * ARM PrimeXsys System Controller SP810 header file
  *
  * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index fed36418dd1c2fe8c1c084278080f2ff23169725..6c78956aa47092440edb3a73e7b9389ac3a57558 100644 (file)
@@ -45,6 +45,7 @@ enum {
        ATA_SECT_SIZE           = 512,
        ATA_MAX_SECTORS_128     = 128,
        ATA_MAX_SECTORS         = 256,
+       ATA_MAX_SECTORS_1024    = 1024,
        ATA_MAX_SECTORS_LBA48   = 65535,/* TODO: 65536? */
        ATA_MAX_SECTORS_TAPE    = 65535,
 
index 58cfab80dd707ff28d8b4e12fdf735bc24f7f60b..1b62d768c7df5ea3053d09ed3878d1d7a16837b2 100644 (file)
@@ -47,6 +47,7 @@ struct blkcg {
 
        struct blkcg_policy_data        *pd[BLKCG_MAX_POLS];
 
+       struct list_head                all_blkcgs_node;
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct list_head                cgwb_list;
 #endif
@@ -88,18 +89,12 @@ struct blkg_policy_data {
  * Policies that need to keep per-blkcg data which is independent
  * from any request_queue associated to it must specify its size
  * with the cpd_size field of the blkcg_policy structure and
- * embed a blkcg_policy_data in it. blkcg core allocates
- * policy-specific per-blkcg structures lazily the first time
- * they are actually needed, so it handles them together with
- * blkgs. cpd_init() is invoked to let each policy handle
- * per-blkcg data.
+ * embed a blkcg_policy_data in it.  cpd_init() is invoked to let
+ * each policy handle per-blkcg data.
  */
 struct blkcg_policy_data {
        /* the policy id this per-policy data belongs to */
        int                             plid;
-
-       /* used during policy activation */
-       struct list_head                alloc_node;
 };
 
 /* association between a blk cgroup and a request queue */
index 4383476a0d4814beb6c03d32feb52e8ce94a0014..139d6d2e123fb0c69bbe31705b08b637c020ac8d 100644 (file)
@@ -192,5 +192,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
 extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
 
 #endif /* _LINUX_BPF_H */
index a240b18e86fa4e6cfcc1aaea2b00d4d899c44bdf..08bffcc466de7e5bce4359b3174ba8105e734a35 100644 (file)
@@ -33,18 +33,19 @@ struct clk_lookup {
        }
 
 struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
-       const char *dev_fmt, ...);
+       const char *dev_fmt, ...) __printf(3, 4);
 
 void clkdev_add(struct clk_lookup *cl);
 void clkdev_drop(struct clk_lookup *cl);
 
 struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
-       const char *dev_fmt, ...);
+       const char *dev_fmt, ...) __printf(3, 4);
 
 void clkdev_add_table(struct clk_lookup *, size_t);
 int clk_add_alias(const char *, const char *, const char *, struct device *);
 
-int clk_register_clkdev(struct clk *, const char *, const char *, ...);
+int clk_register_clkdev(struct clk *, const char *, const char *, ...)
+       __printf(3, 4);
 int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
 
 #ifdef CONFIG_COMMON_CLK
index ab25814690bc649421cc16092653ae5217406198..a76c9172b2eb08de11a33331c35c7533ed041967 100644 (file)
@@ -424,7 +424,7 @@ asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
 
 asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
 
-extern int compat_printk(const char *fmt, ...);
+extern __printf(1, 2) int compat_printk(const char *fmt, ...);
 extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
 extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
 
index c9e5c57e4edf2c09ccc4c09c72c54a87c1ecbd9c..63a36e89d0eb6b3f718d7058b723291662c186d3 100644 (file)
@@ -64,7 +64,8 @@ struct config_item {
        struct dentry           *ci_dentry;
 };
 
-extern int config_item_set_name(struct config_item *, const char *, ...);
+extern __printf(2, 3)
+int config_item_set_name(struct config_item *, const char *, ...);
 
 static inline char *config_item_name(struct config_item * item)
 {
index 76abba4b238ece14f8a2bd9bcce5ab53306c61fd..dcacb1a72e26d0755703135016105ae511a2b042 100644 (file)
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
        __u64   mm_reg_addr;
 };
 
-/* Memory Error Section */
+/* Old Memory Error Section UEFI 2.1, 2.2 */
+struct cper_sec_mem_err_old {
+       __u64   validation_bits;
+       __u64   error_status;
+       __u64   physical_addr;
+       __u64   physical_addr_mask;
+       __u16   node;
+       __u16   card;
+       __u16   module;
+       __u16   bank;
+       __u16   device;
+       __u16   row;
+       __u16   column;
+       __u16   bit_pos;
+       __u64   requestor_id;
+       __u64   responder_id;
+       __u64   target_id;
+       __u8    error_type;
+};
+
+/* Memory Error Section UEFI >= 2.3 */
 struct cper_sec_mem_err {
        __u64   validation_bits;
        __u64   error_status;
index c0fb6b1b4712c30bdf9f7749fefead8a5a2be622..23c30bdcca8631f80242d62d4b167d20e2814c75 100644 (file)
@@ -40,9 +40,10 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
 extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
 extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
 
-extern struct device *cpu_device_create(struct device *parent, void *drvdata,
-                                       const struct attribute_group **groups,
-                                       const char *fmt, ...);
+extern __printf(4, 5)
+struct device *cpu_device_create(struct device *parent, void *drvdata,
+                                const struct attribute_group **groups,
+                                const char *fmt, ...);
 #ifdef CONFIG_HOTPLUG_CPU
 extern void unregister_cpu(struct cpu *cpu);
 extern ssize_t arch_cpu_probe(const char *, size_t);
index 29ad97c34fd5cf4bcdeec373d2ad5691bb2ec6c3..bde1e567b3a93ad5feb7a0d2aa980a07e28270d5 100644 (file)
@@ -62,6 +62,7 @@ struct cpufreq_policy {
        /* CPUs sharing clock, require sw coordination */
        cpumask_var_t           cpus;   /* Online CPUs only */
        cpumask_var_t           related_cpus; /* Online + Offline CPUs */
+       cpumask_var_t           real_cpus; /* Related and present */
 
        unsigned int            shared_type; /* ACPI: ANY or ALL affected CPUs
                                                should set cpufreq */
index d2d50249b7b2a16bd08b3c6f82ea3748836c7afc..d67ae119cf4eb9d3143d5019108f1d7fbf252b88 100644 (file)
@@ -327,7 +327,8 @@ static inline unsigned d_count(const struct dentry *dentry)
 /*
  * helper function for dentry_operations.d_dname() members
  */
-extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern __printf(4, 5)
+char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 extern char *simple_dname(struct dentry *, char *, int);
 
 extern char *__d_path(const struct path *, const struct path *, char *, int);
index 5a31bf3a40243e1faff078fdc509361568143f01..a2b4ea70a9467520d93a556190ca8995183e4af9 100644 (file)
@@ -637,8 +637,9 @@ extern int devres_release_group(struct device *dev, void *id);
 
 /* managed devm_k.alloc/kfree for device drivers */
 extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
-extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
-                            va_list ap);
+extern __printf(3, 0)
+char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
+                     va_list ap);
 extern __printf(3, 4)
 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
 static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
@@ -1011,12 +1012,10 @@ extern int __must_check device_reprobe(struct device *dev);
 /*
  * Easy functions for dynamically creating devices on the fly
  */
-extern struct device *device_create_vargs(struct class *cls,
-                                         struct device *parent,
-                                         dev_t devt,
-                                         void *drvdata,
-                                         const char *fmt,
-                                         va_list vargs);
+extern __printf(5, 0)
+struct device *device_create_vargs(struct class *cls, struct device *parent,
+                                  dev_t devt, void *drvdata,
+                                  const char *fmt, va_list vargs);
 extern __printf(5, 6)
 struct device *device_create(struct class *cls, struct device *parent,
                             dev_t devt, void *drvdata,
index 17724f6ea983c9c5ac8fecb2069ec1d90b451168..fa2cab985e577681c801f8861c299ad938ec9ffc 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/printk.h>
 #include <linux/workqueue.h>
+#include <linux/sched.h>
 
 #include <asm/cacheflush.h>
 
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
                   offsetof(struct bpf_prog, insns[proglen]));
 }
 
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+       /* When classic BPF programs have been loaded and the arch
+        * does not have a classic BPF JIT (anymore), they have been
+        * converted via bpf_migrate_filter() to eBPF and thus always
+        * have an unspec program type.
+        */
+       return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
 
 #ifdef CONFIG_BPF_JIT
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
                                u32 pass, void *image)
 {
-       pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
-              flen, proglen, pass, image);
+       pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+              proglen, pass, image, current->comm, task_pid_nr(current));
+
        if (image)
                print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
                               16, 1, image, proglen, false);
index a0653e560c2679a2eea870035a55cd3282e47894..cc008c338f5a9bcb66076da96e929d6104697373 100644 (file)
@@ -1046,12 +1046,12 @@ extern void locks_remove_file(struct file *);
 extern void locks_release_private(struct file_lock *);
 extern void posix_test_lock(struct file *, struct file_lock *);
 extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_lock_file_wait(struct file *, struct file_lock *);
+extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
 extern int posix_unblock_lock(struct file_lock *);
 extern int vfs_test_lock(struct file *, struct file_lock *);
 extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
 extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
 extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
 extern void lease_get_mtime(struct inode *, struct timespec *time);
 extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
@@ -1137,7 +1137,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
        return -ENOLCK;
 }
 
-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+static inline int posix_lock_inode_wait(struct inode *inode,
+                                       struct file_lock *fl)
 {
        return -ENOLCK;
 }
@@ -1163,8 +1164,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
        return 0;
 }
 
-static inline int flock_lock_file_wait(struct file *filp,
-                                      struct file_lock *request)
+static inline int flock_lock_inode_wait(struct inode *inode,
+                                       struct file_lock *request)
 {
        return -ENOLCK;
 }
@@ -1202,6 +1203,20 @@ static inline void show_fd_locks(struct seq_file *f,
                        struct file *filp, struct files_struct *files) {}
 #endif /* !CONFIG_FILE_LOCKING */
 
+static inline struct inode *file_inode(const struct file *f)
+{
+       return f->f_inode;
+}
+
+static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+       return posix_lock_inode_wait(file_inode(filp), fl);
+}
+
+static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+       return flock_lock_inode_wait(file_inode(filp), fl);
+}
 
 struct fasync_struct {
        spinlock_t              fa_lock;
@@ -2011,11 +2026,6 @@ extern void ihold(struct inode * inode);
 extern void iput(struct inode *);
 extern int generic_update_time(struct inode *, struct timespec *, int);
 
-static inline struct inode *file_inode(const struct file *f)
-{
-       return f->f_inode;
-}
-
 /* /sys/fs */
 extern struct kobject *fs_kobj;
 
index 1da602982cf93a6a262bef85967945ffea9c586a..6cd8c0ee4b6f89a9ab93b67515cb6e214e108071 100644 (file)
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
  *            SAVE_REGS. If another ops with this flag set is already registered
  *            for any of the functions that this ops will be registered for, then
  *            this ops will fail to register or set_filter_ip.
+ * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
  */
 enum {
        FTRACE_OPS_FL_ENABLED                   = 1 << 0,
@@ -132,6 +133,7 @@ enum {
        FTRACE_OPS_FL_MODIFYING                 = 1 << 11,
        FTRACE_OPS_FL_ALLOC_TRAMP               = 1 << 12,
        FTRACE_OPS_FL_IPMODIFY                  = 1 << 13,
+       FTRACE_OPS_FL_PID                       = 1 << 14,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
        struct ftrace_ops               *next;
        unsigned long                   flags;
        void                            *private;
+       ftrace_func_t                   saved_func;
        int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        int                             nr_trampolines;
index cc7ec129b329efb9887c0482c5abeb83562b79f7..c8393cd4d44f2d871bdf962c3dad55505e362b32 100644 (file)
@@ -45,7 +45,7 @@ struct seq_file;
  * @base: identifies the first GPIO number handled by this chip;
  *     or, if negative during registration, requests dynamic ID allocation.
  *     DEPRECATION: providing anything non-negative and nailing the base
- *     base offset of GPIO chips is deprecated. Please pass -1 as base to
+ *     offset of GPIO chips is deprecated. Please pass -1 as base to
  *     let gpiolib select the chip base in all possible cases. We want to
  *     get rid of the static GPIO number space in the long run.
  * @ngpio: the number of GPIOs handled by this controller; the last GPIO
index 0042bf330b99ffa6edd77677529753bdd00b79d4..c02b5ce6c5cdb787c3cb5ddb1ca7652dc5fe9d1e 100644 (file)
@@ -230,6 +230,7 @@ struct hid_sensor_common {
        struct platform_device *pdev;
        unsigned usage_id;
        atomic_t data_ready;
+       atomic_t user_requested_state;
        struct iio_trigger *trigger;
        struct hid_sensor_hub_attribute_info poll;
        struct hid_sensor_hub_attribute_info report_state;
index 205026175c4208a8e65423f6042606a5be67bec7..d891f949466ae2b11d2fd063a4493044e5dc9618 100644 (file)
@@ -460,15 +460,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
        return &mm->page_table_lock;
 }
 
-static inline bool hugepages_supported(void)
-{
-       /*
-        * Some platform decide whether they support huge pages at boot
-        * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
-        * there is no such support
-        */
-       return HPAGE_SHIFT != 0;
-}
+#ifndef hugepages_supported
+/*
+ * Some platform decide whether they support huge pages at boot
+ * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
+ * when there is no such support
+ */
+#define hugepages_supported() (HPAGE_SHIFT != 0)
+#endif
 
 #else  /* CONFIG_HUGETLB_PAGE */
 struct hstate {};
index 7c68c36d3fd88788f043447c5bed889cdd408bb6..b449f378f995ae647077f521d9f7af3af9480a70 100644 (file)
@@ -282,68 +282,8 @@ void __init parse_early_param(void);
 void __init parse_early_options(char *cmdline);
 #endif /* __ASSEMBLY__ */
 
-/**
- * module_init() - driver initialization entry point
- * @x: function to be run at kernel boot time or module insertion
- * 
- * module_init() will either be called during do_initcalls() (if
- * builtin) or at module insertion time (if a module).  There can only
- * be one per module.
- */
-#define module_init(x) __initcall(x);
-
-/**
- * module_exit() - driver exit entry point
- * @x: function to be run when driver is removed
- * 
- * module_exit() will wrap the driver clean-up code
- * with cleanup_module() when used with rmmod when
- * the driver is a module.  If the driver is statically
- * compiled into the kernel, module_exit() has no effect.
- * There can only be one per module.
- */
-#define module_exit(x) __exitcall(x);
-
 #else /* MODULE */
 
-/*
- * In most cases loadable modules do not need custom
- * initcall levels. There are still some valid cases where
- * a driver may be needed early if built in, and does not
- * matter when built as a loadable module. Like bus
- * snooping debug drivers.
- */
-#define early_initcall(fn)             module_init(fn)
-#define core_initcall(fn)              module_init(fn)
-#define core_initcall_sync(fn)         module_init(fn)
-#define postcore_initcall(fn)          module_init(fn)
-#define postcore_initcall_sync(fn)     module_init(fn)
-#define arch_initcall(fn)              module_init(fn)
-#define subsys_initcall(fn)            module_init(fn)
-#define subsys_initcall_sync(fn)       module_init(fn)
-#define fs_initcall(fn)                        module_init(fn)
-#define fs_initcall_sync(fn)           module_init(fn)
-#define rootfs_initcall(fn)            module_init(fn)
-#define device_initcall(fn)            module_init(fn)
-#define device_initcall_sync(fn)       module_init(fn)
-#define late_initcall(fn)              module_init(fn)
-#define late_initcall_sync(fn)         module_init(fn)
-
-#define console_initcall(fn)           module_init(fn)
-#define security_initcall(fn)          module_init(fn)
-
-/* Each module must use one module_init(). */
-#define module_init(initfn)                                    \
-       static inline initcall_t __inittest(void)               \
-       { return initfn; }                                      \
-       int init_module(void) __attribute__((alias(#initfn)));
-
-/* This is only required if you want to be unloadable. */
-#define module_exit(exitfn)                                    \
-       static inline exitcall_t __exittest(void)               \
-       { return exitfn; }                                      \
-       void cleanup_module(void) __attribute__((alias(#exitfn)));
-
 #define __setup_param(str, unique_id, fn)      /* nothing */
 #define __setup(str, func)                     /* nothing */
 #endif
@@ -351,24 +291,6 @@ void __init parse_early_options(char *cmdline);
 /* Data marked not to be saved by software suspend */
 #define __nosavedata __section(.data..nosave)
 
-/* This means "can be init if no module support, otherwise module load
-   may call it." */
-#ifdef CONFIG_MODULES
-#define __init_or_module
-#define __initdata_or_module
-#define __initconst_or_module
-#define __INIT_OR_MODULE       .text
-#define __INITDATA_OR_MODULE   .data
-#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
-#else
-#define __init_or_module __init
-#define __initdata_or_module __initdata
-#define __initconst_or_module __initconst
-#define __INIT_OR_MODULE __INIT
-#define __INITDATA_OR_MODULE __INITDATA
-#define __INITRODATA_OR_MODULE __INITRODATA
-#endif /*CONFIG_MODULES*/
-
 #ifdef MODULE
 #define __exit_p(x) x
 #else
index dc767f7c3704639da944153e8bdae7cf40cd804d..f9c1b6d0f2e4bcda9db87aa9d8d5907acd0da2d4 100644 (file)
@@ -258,7 +258,7 @@ extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
                                 void *data);
 struct device *iommu_device_create(struct device *parent, void *drvdata,
                                   const struct attribute_group **groups,
-                                  const char *fmt, ...);
+                                  const char *fmt, ...) __printf(4, 5);
 void iommu_device_destroy(struct device *dev);
 int iommu_device_link(struct device *dev, struct device *link);
 void iommu_device_unlink(struct device *dev, struct device *link);
index 82806c60aa4273d67ff5592dfc11e4eaa57c51d7..cb9dcad72372150f2ad0d76c229d5f63b07f74ff 100644 (file)
@@ -29,6 +29,7 @@ struct ipv6_devconf {
        __s32           max_desync_factor;
        __s32           max_addresses;
        __s32           accept_ra_defrtr;
+       __s32           accept_ra_min_hop_limit;
        __s32           accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
        __s32           accept_ra_rtr_pref;
@@ -57,6 +58,7 @@ struct ipv6_devconf {
                bool initialized;
                struct in6_addr secret;
        } stable_secret;
+       __s32           use_oif_addrs_only;
        void            *sysctl;
 };
 
@@ -94,7 +96,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
 struct inet6_skb_parm {
        int                     iif;
        __be16                  ra;
-       __u16                   hop;
        __u16                   dst0;
        __u16                   srcrt;
        __u16                   dst1;
@@ -111,6 +112,7 @@ struct inet6_skb_parm {
 #define IP6SKB_REROUTED                4
 #define IP6SKB_ROUTERALERT     8
 #define IP6SKB_FRAGMENTED      16
+#define IP6SKB_HOPBYHOP        32
 };
 
 #define IP6CB(skb)     ((struct inet6_skb_parm*)((skb)->cb))
index 5f0be58640ea6e73f88dd02cb3ac3bc7cdb6a0e6..5582410727cbf5cdb70bb1cba30729d7ebf4aa26 100644 (file)
@@ -411,7 +411,8 @@ extern __printf(3, 0)
 int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
 extern __printf(2, 3)
 char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+extern __printf(2, 0)
+char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
 
 extern __scanf(2, 3)
 int sscanf(const char *, const char *, ...);
@@ -679,10 +680,10 @@ do {                                                                      \
                __ftrace_vprintk(_THIS_IP_, fmt, vargs);                \
 } while (0)
 
-extern int
+extern __printf(2, 0) int
 __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
 
-extern int
+extern __printf(2, 0) int
 __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
 
 extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
@@ -702,7 +703,7 @@ int trace_printk(const char *fmt, ...)
 {
        return 0;
 }
-static inline int
+static __printf(1, 0) inline int
 ftrace_vprintk(const char *fmt, va_list ap)
 {
        return 0;
index 2d61b909f414f6f61e37ca9c53372d85392242fd..637f67002c5ac6b39536b5cf74a77caf3b1f749f 100644 (file)
@@ -80,8 +80,9 @@ struct kobject {
 
 extern __printf(2, 3)
 int kobject_set_name(struct kobject *kobj, const char *name, ...);
-extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
-                                 va_list vargs);
+extern __printf(2, 0)
+int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+                          va_list vargs);
 
 static inline const char *kobject_name(const struct kobject *kobj)
 {
index 9564fd78c547b6128ddf8304639e2215b6190e3f..05e99b8ef465bcc10ca979b26b68277191951d86 100644 (file)
@@ -734,6 +734,24 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
        return false;
 }
 #endif
+#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
+void kvm_arch_start_assignment(struct kvm *kvm);
+void kvm_arch_end_assignment(struct kvm *kvm);
+bool kvm_arch_has_assigned_device(struct kvm *kvm);
+#else
+static inline void kvm_arch_start_assignment(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_end_assignment(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
+{
+       return false;
+}
+#endif
 
 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 {
index 36ce37bcc963c270548989d658caa4f3ce832045..c9cfbcdb8d140e2f136b724501a064a25a766fb3 100644 (file)
@@ -431,6 +431,8 @@ enum {
        ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21),  /* some WDs have broken LPM */
        ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
        ATA_HORKAGE_NO_NCQ_LOG  = (1 << 23),    /* don't use NCQ for log read */
+       ATA_HORKAGE_NOTRIM      = (1 << 24),    /* don't use TRIM */
+       ATA_HORKAGE_MAX_SEC_1024 = (1 << 25),   /* Limit max sects to 1024 */
 
         /* DMA mask for user DMA control: User visible values; DO NOT
            renumber */
index e7ecc12a11636f1afe2f591e019da293ed1f517a..09cebe528488e18c8561b27f2f18eade1f51c356 100644 (file)
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
 
 enum {
        MLX4_CQE_L2_TUNNEL_IPOK         = 1 << 31,
-       MLX4_CQE_VLAN_PRESENT_MASK      = 1 << 29,
+       MLX4_CQE_CVLAN_PRESENT_MASK     = 1 << 29,
+       MLX4_CQE_SVLAN_PRESENT_MASK     = 1 << 30,
        MLX4_CQE_L2_TUNNEL              = 1 << 27,
        MLX4_CQE_L2_TUNNEL_CSUM         = 1 << 26,
        MLX4_CQE_L2_TUNNEL_IPV4         = 1 << 25,
index fd13c1ce3b4abf797a4a720c9dd567587a477274..bcbf8c72a77bee6ef2acc96489a4559bf02467ee 100644 (file)
@@ -211,6 +211,8 @@ enum {
        MLX4_DEV_CAP_FLAG2_ETS_CFG              = 1LL <<  26,
        MLX4_DEV_CAP_FLAG2_PORT_BEACON          = 1LL <<  27,
        MLX4_DEV_CAP_FLAG2_IGNORE_FCS           = 1LL <<  28,
+       MLX4_DEV_CAP_FLAG2_PHV_EN               = 1LL <<  29,
+       MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN      = 1LL <<  30,
 };
 
 enum {
@@ -581,6 +583,7 @@ struct mlx4_caps {
        u64                     phys_port_id[MLX4_MAX_PORTS + 1];
        int                     tunnel_offload_mode;
        u8                      rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+       u8                      phv_bit[MLX4_MAX_PORTS + 1];
        u8                      alloc_res_qp_mask;
        u32                     dmfs_high_rate_qpn_base;
        u32                     dmfs_high_rate_qpn_range;
@@ -1332,6 +1335,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
                            u8 ignore_fcs_value);
 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
index 6fed539e54569c3f0701632a8a17e9bdf16a53d6..de45a51b3f041d28e644cc6a0ce011d6cd2d4fb6 100644 (file)
@@ -272,7 +272,8 @@ enum {
        MLX4_WQE_CTRL_SOLICITED         = 1 << 1,
        MLX4_WQE_CTRL_IP_CSUM           = 1 << 4,
        MLX4_WQE_CTRL_TCP_UDP_CSUM      = 1 << 5,
-       MLX4_WQE_CTRL_INS_VLAN          = 1 << 6,
+       MLX4_WQE_CTRL_INS_CVLAN         = 1 << 6,
+       MLX4_WQE_CTRL_INS_SVLAN         = 1 << 7,
        MLX4_WQE_CTRL_STRONG_ORDER      = 1 << 7,
        MLX4_WQE_CTRL_FORCE_LOOPBACK    = 1 << 0,
 };
index 5722d88c24290358a928914c48fafec9a886b1c7..5fe0cae1a515567fb59b42e9b7ba49e9033826f5 100644 (file)
@@ -380,7 +380,7 @@ struct mlx5_uar {
        u32                     index;
        struct list_head        bf_list;
        unsigned                free_bf_bmap;
-       void __iomem           *wc_map;
+       void __iomem           *bf_map;
        void __iomem           *map;
 };
 
@@ -435,6 +435,8 @@ struct mlx5_priv {
        struct mlx5_uuar_info   uuari;
        MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
+       struct io_mapping       *bf_mapping;
+
        /* pages stuff */
        struct workqueue_struct *pg_wq;
        struct rb_root          page_root;
@@ -463,6 +465,10 @@ struct mlx5_priv {
        /* end: mr staff */
 
        /* start: alloc staff */
+       /* protect buffer alocation according to numa node */
+       struct mutex            alloc_mutex;
+       int                     numa_node;
+
        struct mutex            pgdir_mutex;
        struct list_head        pgdir_list;
        /* end: alloc staff */
@@ -672,6 +678,8 @@ void mlx5_health_cleanup(void);
 void  __init mlx5_health_init(void);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+                       struct mlx5_buf *buf, int node);
 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -773,6 +781,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+                      int node);
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
 
 const char *mlx5_command_str(int command);
index 6d2f6fee041cd4f663fd7d6898b8ed8418a95b73..c60a62bba652c112517abefdaf60433437780601 100644 (file)
@@ -1936,9 +1936,9 @@ enum {
 };
 
 enum {
-       MLX5_TIRC_RX_HASH_FN_HASH_NONE           = 0x0,
-       MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8  = 0x1,
-       MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ       = 0x2,
+       MLX5_RX_HASH_FN_NONE           = 0x0,
+       MLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,
+       MLX5_RX_HASH_FN_TOEPLITZ       = 0x2,
 };
 
 enum {
index c5d52780d6a02fe27ec67cb892b447c43a6beefb..3ba327af055cc43e4d38517fda8557420238f7f0 100644 (file)
@@ -106,6 +106,6 @@ extern void enable_mmiotrace(void);
 extern void disable_mmiotrace(void);
 extern void mmio_trace_rw(struct mmiotrace_rw *rw);
 extern void mmio_trace_mapping(struct mmiotrace_map *map);
-extern int mmio_trace_printk(const char *fmt, va_list args);
+extern __printf(1, 0) int mmio_trace_printk(const char *fmt, va_list args);
 
 #endif /* _LINUX_MMIOTRACE_H */
index d67b1932cc59869cd5c3dc5d24efa5994b368386..3a19c79918e02d37c3e77ed32a88eb9f36bb16c3 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/compiler.h>
 #include <linux/cache.h>
 #include <linux/kmod.h>
+#include <linux/init.h>
 #include <linux/elf.h>
 #include <linux/stringify.h>
 #include <linux/kobject.h>
@@ -71,6 +72,89 @@ extern struct module_attribute module_uevent;
 extern int init_module(void);
 extern void cleanup_module(void);
 
+#ifndef MODULE
+/**
+ * module_init() - driver initialization entry point
+ * @x: function to be run at kernel boot time or module insertion
+ *
+ * module_init() will either be called during do_initcalls() (if
+ * builtin) or at module insertion time (if a module).  There can only
+ * be one per module.
+ */
+#define module_init(x) __initcall(x);
+
+/**
+ * module_exit() - driver exit entry point
+ * @x: function to be run when driver is removed
+ *
+ * module_exit() will wrap the driver clean-up code
+ * with cleanup_module() when used with rmmod when
+ * the driver is a module.  If the driver is statically
+ * compiled into the kernel, module_exit() has no effect.
+ * There can only be one per module.
+ */
+#define module_exit(x) __exitcall(x);
+
+#else /* MODULE */
+
+/*
+ * In most cases loadable modules do not need custom
+ * initcall levels. There are still some valid cases where
+ * a driver may be needed early if built in, and does not
+ * matter when built as a loadable module. Like bus
+ * snooping debug drivers.
+ */
+#define early_initcall(fn)             module_init(fn)
+#define core_initcall(fn)              module_init(fn)
+#define core_initcall_sync(fn)         module_init(fn)
+#define postcore_initcall(fn)          module_init(fn)
+#define postcore_initcall_sync(fn)     module_init(fn)
+#define arch_initcall(fn)              module_init(fn)
+#define subsys_initcall(fn)            module_init(fn)
+#define subsys_initcall_sync(fn)       module_init(fn)
+#define fs_initcall(fn)                        module_init(fn)
+#define fs_initcall_sync(fn)           module_init(fn)
+#define rootfs_initcall(fn)            module_init(fn)
+#define device_initcall(fn)            module_init(fn)
+#define device_initcall_sync(fn)       module_init(fn)
+#define late_initcall(fn)              module_init(fn)
+#define late_initcall_sync(fn)         module_init(fn)
+
+#define console_initcall(fn)           module_init(fn)
+#define security_initcall(fn)          module_init(fn)
+
+/* Each module must use one module_init(). */
+#define module_init(initfn)                                    \
+       static inline initcall_t __inittest(void)               \
+       { return initfn; }                                      \
+       int init_module(void) __attribute__((alias(#initfn)));
+
+/* This is only required if you want to be unloadable. */
+#define module_exit(exitfn)                                    \
+       static inline exitcall_t __exittest(void)               \
+       { return exitfn; }                                      \
+       void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+#endif
+
+/* This means "can be init if no module support, otherwise module load
+   may call it." */
+#ifdef CONFIG_MODULES
+#define __init_or_module
+#define __initdata_or_module
+#define __initconst_or_module
+#define __INIT_OR_MODULE       .text
+#define __INITDATA_OR_MODULE   .data
+#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
+#else
+#define __init_or_module __init
+#define __initdata_or_module __initdata
+#define __initconst_or_module __initconst
+#define __INIT_OR_MODULE __INIT
+#define __INITDATA_OR_MODULE __INITDATA
+#define __INITRODATA_OR_MODULE __INITRODATA
+#endif /*CONFIG_MODULES*/
+
 /* Archs provide a method of finding the correct exception table. */
 struct exception_table_entry;
 
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644 (file)
index 0000000..ef29eb2
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif  /* _LINUX_MPLS_IPTUNNEL_H */
index f25e2bdd188c94643efc0f3a94c7c67de81897aa..272f42952f3424aa43953b7e3a50b925a93bd465 100644 (file)
@@ -177,11 +177,6 @@ typedef enum {
 #define NAND_OWN_BUFFERS       0x00020000
 /* Chip may not exist, so silence any errors in scan */
 #define NAND_SCAN_SILENT_NODEV 0x00040000
-/*
- * This option could be defined by controller drivers to protect against
- * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
- */
-#define NAND_USE_BOUNCE_BUFFER 0x00080000
 /*
  * Autodetect nand buswidth with readid/onfi.
  * This suppose the driver will configure the hardware in 8 bits mode
@@ -189,6 +184,11 @@ typedef enum {
  * before calling nand_scan_tail.
  */
 #define NAND_BUSWIDTH_AUTO      0x00080000
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USE_BOUNCE_BUFFER 0x00100000
 
 /* Options set by nand scan */
 /* Nand scan has allocated controller struct */
index e20979dfd6a99688a696779b8952ab66bc143739..607b5f41f46f93e506adbefd7b3ed11ed8acfb67 100644 (file)
@@ -766,6 +766,13 @@ struct netdev_phys_item_id {
        unsigned char id_len;
 };
 
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+                                           struct netdev_phys_item_id *b)
+{
+       return a->id_len == b->id_len &&
+              memcmp(a->id, b->id, a->id_len) == 0;
+}
+
 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
                                       struct sk_buff *skb);
 
@@ -1041,6 +1048,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     TX queue.
  * int (*ndo_get_iflink)(const struct net_device *dev);
  *     Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ *                               bool proto_down);
+ *     This function is used to pass protocol port error state information
+ *     to the switch driver. The switch driver can react to the proto_down
+ *      by doing a phys down on the associated switch port.
+ *
  */
 struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@ struct net_device_ops {
                                                      int queue_index,
                                                      u32 maxrate);
        int                     (*ndo_get_iflink)(const struct net_device *dev);
+       int                     (*ndo_change_proto_down)(struct net_device *dev,
+                                                        bool proto_down);
 };
 
 /**
@@ -1448,6 +1463,8 @@ enum netdev_priv_flags {
  *
  *     @xps_maps:      XXX: need comments on this one
  *
+ *     @offload_fwd_mark:      Offload device fwding mark
+ *
  *     @trans_start:           Time (in jiffies) of last Tx
  *     @watchdog_timeo:        Represents the timeout that is used by
  *                             the watchdog ( see dev_watchdog() )
@@ -1502,6 +1519,10 @@ enum netdev_priv_flags {
  *
  *     @qdisc_tx_busylock:     XXX: need comments on this one
  *
+ *     @proto_down:    protocol port state information can be sent to the
+ *                     switch driver and used to set the phys state of the
+ *                     switch port.
+ *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
@@ -1685,6 +1706,10 @@ struct net_device {
        struct xps_dev_maps __rcu *xps_maps;
 #endif
 
+#ifdef CONFIG_NET_SWITCHDEV
+       u32                     offload_fwd_mark;
+#endif
+
        /* These may be needed for future network-power-down code. */
 
        /*
@@ -1762,6 +1787,7 @@ struct net_device {
 #endif
        struct phy_device *phydev;
        struct lock_class_key *qdisc_tx_busylock;
+       bool proto_down;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
@@ -2982,6 +3008,7 @@ int dev_get_phys_port_id(struct net_device *dev,
                         struct netdev_phys_item_id *ppid);
 int dev_get_phys_port_name(struct net_device *dev,
                           char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                    struct netdev_queue *txq, int *ret);
index 00050dfd9f2309e9cd0f9a62f133e0d589fc47de..d788ce62d8264a4db1aec11a00801c1875f15774 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/list.h>
 #include <linux/static_key.h>
 #include <linux/netfilter_defs.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
 
 #ifdef CONFIG_NETFILTER
 static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@ struct nf_sockopt_ops {
 };
 
 /* Function to register/unregister hook points. */
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+                         unsigned int n);
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+                            unsigned int n);
+
 int nf_register_hook(struct nf_hook_ops *reg);
 void nf_unregister_hook(struct nf_hook_ops *reg);
 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
 int nf_register_sockopt(struct nf_sockopt_ops *reg);
 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 
-extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
 #ifdef HAVE_JUMP_LABEL
 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
                                       u_int8_t pf, unsigned int hook)
 {
        if (__builtin_constant_p(pf) &&
            __builtin_constant_p(hook))
                return static_key_false(&nf_hooks_needed[pf][hook]);
 
-       return !list_empty(nf_hook_list);
+       return !list_empty(hook_list);
 }
 #else
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
                                       u_int8_t pf, unsigned int hook)
 {
-       return !list_empty(nf_hook_list);
+       return !list_empty(hook_list);
 }
 #endif
 
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
-{
-       return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
-}
-
 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
 
 /**
@@ -172,10 +174,13 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
                                 int (*okfn)(struct sock *, struct sk_buff *),
                                 int thresh)
 {
-       if (nf_hooks_active(pf, hook)) {
+       struct net *net = dev_net(indev ? indev : outdev);
+       struct list_head *hook_list = &net->nf.hooks[pf][hook];
+
+       if (nf_hook_list_active(hook_list, pf, hook)) {
                struct nf_hook_state state;
 
-               nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+               nf_hook_state_init(&state, hook_list, hook, thresh,
                                   pf, indev, outdev, sk, okfn);
                return nf_hook_slow(skb, &state);
        }
@@ -385,4 +390,15 @@ extern struct nfq_ct_hook __rcu *nfq_ct_hook;
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 #endif
 
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
+
 #endif /*__LINUX_NETFILTER_H*/
index 286098a5667f5f40852aacefce423ac5e3721ae8..b006b719183fc40aef93d6eae1e90f302979a4dc 100644 (file)
@@ -3,6 +3,7 @@
 
 
 #include <linux/netdevice.h>
+#include <linux/static_key.h>
 #include <uapi/linux/netfilter/x_tables.h>
 
 /**
@@ -222,7 +223,6 @@ struct xt_table_info {
         * @stacksize jumps (number of user chains) can possibly be made.
         */
        unsigned int stacksize;
-       unsigned int __percpu *stackptr;
        void ***jumpstack;
 
        unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@ void xt_free_table_info(struct xt_table_info *info);
  */
 DECLARE_PER_CPU(seqcount_t, xt_recseq);
 
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
 /**
  * xt_write_recseq_begin - start of a write section
  *
index 6d80fc68632328c743f66d6370388b5406eb7fcb..2437b8a5d7a945d2a3ab5c4ff67dfd6f260bc95a 100644 (file)
@@ -17,9 +17,6 @@ enum nf_br_hook_priorities {
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 
-#define BRNF_BRIDGED_DNAT              0x02
-#define BRNF_NF_BRIDGE_PREROUTING      0x08
-
 int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
 
 static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@ nf_bridge_get_physoutdev(const struct sk_buff *skb)
 {
        return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
 }
+
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+       return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+}
 #else
 #define br_drop_fake_rtable(skb)               do { } while (0)
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+       return false;
+}
 #endif /* CONFIG_BRIDGE_NETFILTER */
 
 #endif
index f91b5ade30c98fe8b03d06bc40deb0c434edb633..874b77228fb96285fb2024f07fa99d1ed7dd6dda 100644 (file)
@@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
        struct nfs_inode *nfsi = NFS_I(inode);
 
        spin_lock(&inode->i_lock);
-       nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
+       nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
+                               NFS_INO_REVAL_PAGECACHE |
+                               NFS_INO_INVALID_ACCESS |
+                               NFS_INO_INVALID_ACL;
        if (S_ISDIR(inode->i_mode))
-               nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+               nfsi->cache_validity |= NFS_INO_INVALID_DATA;
        spin_unlock(&inode->i_lock);
 }
 
index a2ea1491d3dfc487611445490fb10adf9972d777..20bc8e51b16124496326274e84fc12d61996a4fd 100644 (file)
@@ -220,7 +220,7 @@ struct nfs_server {
 #define NFS_CAP_SYMLINKS       (1U << 2)
 #define NFS_CAP_ACLS           (1U << 3)
 #define NFS_CAP_ATOMIC_OPEN    (1U << 4)
-#define NFS_CAP_CHANGE_ATTR    (1U << 5)
+/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
 #define NFS_CAP_FILEID         (1U << 6)
 #define NFS_CAP_MODE           (1U << 7)
 #define NFS_CAP_NLINK          (1U << 8)
index 4c508549833a53fc76b59ba6a0bda354fc4d051f..cc7dd687a89dd60699ebe28556138e293bcde9bc 100644 (file)
@@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
 #else /* CONFIG_OF */
 
 static inline int of_driver_match_device(struct device *dev,
-                                        struct device_driver *drv)
+                                        const struct device_driver *drv)
 {
        return 0;
 }
index b48c3471c254471ed940921ab7bb4832d6f92023..cacaabea8a0950115138c956737c2bd9d7c04a90 100644 (file)
@@ -8,6 +8,7 @@ extern struct page_ext_operations page_owner_ops;
 extern void __reset_page_owner(struct page *page, unsigned int order);
 extern void __set_page_owner(struct page *page,
                        unsigned int order, gfp_t gfp_mask);
+extern gfp_t __get_page_owner_gfp(struct page *page);
 
 static inline void reset_page_owner(struct page *page, unsigned int order)
 {
@@ -25,6 +26,14 @@ static inline void set_page_owner(struct page *page,
 
        __set_page_owner(page, order, gfp_mask);
 }
+
+static inline gfp_t get_page_owner_gfp(struct page *page)
+{
+       if (likely(!page_owner_inited))
+               return 0;
+
+       return __get_page_owner_gfp(page);
+}
 #else
 static inline void reset_page_owner(struct page *page, unsigned int order)
 {
@@ -33,6 +42,10 @@ static inline void set_page_owner(struct page *page,
                        unsigned int order, gfp_t gfp_mask)
 {
 }
+static inline gfp_t get_page_owner_gfp(struct page *page)
+{
+       return 0;
+}
 
 #endif /* CONFIG_PAGE_OWNER */
 #endif /* __LINUX_PAGE_OWNER_H */
index 3cc21c9cc1e86ca6a48b23bbd466ff5aa7134e74..9fade5dd2e869219b8ff193e4f9a0bbdd1aac83f 100644 (file)
@@ -4,7 +4,7 @@
  * Arasan Compact Flash host controller platform data header file
  *
  * Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
+ * Viresh Kumar <vireshk@kernel.org>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index a26c3f84b8ddc6c15e2abbecf47a588419534b11..e5fb1d4159619f7ecad0fca5515e627fd7016e06 100644 (file)
@@ -424,6 +424,8 @@ struct phy_device {
 
        struct net_device *attached_dev;
 
+       u8 mdix;
+
        void (*adjust_link)(struct net_device *dev);
 };
 #define to_phy_device(d) container_of(d, struct phy_device, dev)
index 044a124bfbbc6b313d96f187a598fcb1438b8cc6..21b15f6fee2546e520a355b5a1d8cd4835fdc518 100644 (file)
@@ -8,11 +8,19 @@
 #ifndef __MACB_PDATA_H__
 #define __MACB_PDATA_H__
 
+/**
+ * struct macb_platform_data - platform data for MACB Ethernet
+ * @phy_mask:          phy mask passed when register the MDIO bus
+ *                     within the driver
+ * @phy_irq_pin:       PHY IRQ
+ * @is_rmii:           using RMII interface?
+ * @rev_eth_addr:      reverse Ethernet address byte order
+ */
 struct macb_platform_data {
        u32             phy_mask;
-       int             phy_irq_pin;    /* PHY IRQ */
-       u8              is_rmii;        /* using RMII interface? */
-       u8              rev_eth_addr;   /* reverse Ethernet address byte order */
+       int             phy_irq_pin;
+       u8              is_rmii;
+       u8              rev_eth_addr;
 };
 
 #endif /* __MACB_PDATA_H__ */
index 75f70f6ac13778f448a03de3136d32a0ef638b65..e1571efa3f2b28e01642e08f6709d7c7c8dc141a 100644 (file)
@@ -43,7 +43,6 @@ struct esdhc_platform_data {
        enum wp_types wp_type;
        enum cd_types cd_type;
        int max_bus_width;
-       unsigned int f_max;
        bool support_vsel;
        unsigned int delay_line;
 };
index 58b1fec40d37373001a6d46840702d33e70f1001..a6298b27ac99d9197ccd4dac6c04b09f2c5011da 100644 (file)
@@ -122,7 +122,7 @@ static inline __printf(1, 2) __cold
 void early_printk(const char *s, ...) { }
 #endif
 
-typedef int(*printk_func_t)(const char *fmt, va_list args);
+typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
 
 #ifdef CONFIG_PRINTK
 asmlinkage __printf(5, 0)
@@ -166,7 +166,7 @@ char *log_buf_addr_get(void);
 u32 log_buf_len_get(void);
 void log_buf_kexec_setup(void);
 void __init setup_log_buf(int early);
-void dump_stack_set_arch_desc(const char *fmt, ...);
+__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
 void dump_stack_print_info(const char *log_lvl);
 void show_regs_print_info(const char *log_lvl);
 #else
@@ -217,7 +217,7 @@ static inline void setup_log_buf(int early)
 {
 }
 
-static inline void dump_stack_set_arch_desc(const char *fmt, ...)
+static inline __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...)
 {
 }
 
index ae21f1591615e06cec2115563c3f821fe36c868e..04b5ada460b44e4cf8cfdd918ec8572ea8683a8d 100644 (file)
@@ -1522,8 +1522,6 @@ struct task_struct {
 /* hung task detection */
        unsigned long last_switch_count;
 #endif
-/* CPU-specific state of this task */
-       struct thread_struct thread;
 /* filesystem information */
        struct fs_struct *fs;
 /* open file information */
@@ -1778,8 +1776,22 @@ struct task_struct {
        unsigned long   task_state_change;
 #endif
        int pagefault_disabled;
+/* CPU-specific state of this task */
+       struct thread_struct thread;
+/*
+ * WARNING: on x86, 'thread_struct' contains a variable-sized
+ * structure.  It *MUST* be at the end of 'task_struct'.
+ *
+ * Do not put anything below here!
+ */
 };
 
+#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+extern int arch_task_struct_size __read_mostly;
+#else
+# define arch_task_struct_size (sizeof(struct task_struct))
+#endif
+
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
index d6cdd6e87d53bcd1b4f390f61f73b1c91b076bdd..df9fdf5576896e841bec213ebc6d25aee2f6f0fb 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/flow_dissector.h>
 #include <linux/splice.h>
 #include <linux/in6.h>
+#include <net/flow.h>
 
 /* A. Checksumming of received packets by device.
  *
@@ -173,17 +174,24 @@ struct nf_bridge_info {
                BRNF_PROTO_8021Q,
                BRNF_PROTO_PPPOE
        } orig_proto:8;
-       bool                    pkt_otherhost;
+       u8                      pkt_otherhost:1;
+       u8                      in_prerouting:1;
+       u8                      bridged_dnat:1;
        __u16                   frag_max_size;
-       unsigned int            mask;
        struct net_device       *physindev;
        union {
-               struct net_device *physoutdev;
-               char neigh_header[8];
-       };
-       union {
+               /* prerouting: detect dnat in orig/reply direction */
                __be32          ipv4_daddr;
                struct in6_addr ipv6_daddr;
+
+               /* after prerouting + nat detected: store original source
+                * mac since neigh resolution overwrites it, only used while
+                * skb is out in neigh layer.
+                */
+               char neigh_header[8];
+
+               /* always valid & non-NULL from FORWARD on, for physdev match */
+               struct net_device *physoutdev;
        };
 };
 #endif
@@ -506,6 +514,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
  *     @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
   *    @napi_id: id of the NAPI struct this skb came from
  *     @secmark: security marking
+ *     @offload_fwd_mark: fwding offload mark
  *     @mark: Generic packet mark
  *     @vlan_proto: vlan encapsulation protocol
  *     @vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@ struct sk_buff {
                unsigned int    sender_cpu;
        };
 #endif
+       union {
 #ifdef CONFIG_NETWORK_SECMARK
-       __u32                   secmark;
+               __u32           secmark;
+#endif
+#ifdef CONFIG_NET_SWITCHDEV
+               __u32           offload_fwd_mark;
 #endif
+       };
+
        union {
                __u32           mark;
                __u32           reserved_tailroom;
@@ -938,6 +953,26 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
        return skb->hash;
 }
 
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6);
+
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6)
+{
+       if (!skb->l4_hash && !skb->sw_hash)
+               __skb_get_hash_flowi6(skb, fl6);
+
+       return skb->hash;
+}
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4)
+{
+       if (!skb->l4_hash && !skb->sw_hash)
+               __skb_get_hash_flowi4(skb, fl4);
+
+       return skb->hash;
+}
+
 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
 
 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
@@ -2671,12 +2706,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
        skb_shinfo(skb)->frag_list = NULL;
 }
 
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
-       frag->next = skb_shinfo(skb)->frag_list;
-       skb_shinfo(skb)->frag_list = frag;
-}
-
 #define skb_walk_frags(skb, iter)      \
        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 
@@ -3468,5 +3497,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
                               skb_network_header(skb);
        return hdr_len + skb_gso_transport_seglen(skb);
 }
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SKBUFF_H */
index c735f5c91eead34520726a503538ec48046dde86..eead8ab93c0a36e402741ee767d3c3bc70128964 100644 (file)
@@ -119,30 +119,8 @@ struct plat_stmmacenet_data {
        int rx_fifo_size;
        void (*fix_mac_speed)(void *priv, unsigned int speed);
        void (*bus_setup)(void __iomem *ioaddr);
-       void *(*setup)(struct platform_device *pdev);
-       void (*free)(struct platform_device *pdev, void *priv);
        int (*init)(struct platform_device *pdev, void *priv);
        void (*exit)(struct platform_device *pdev, void *priv);
-       void *custom_cfg;
-       void *custom_data;
        void *bsp_priv;
 };
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
-       int has_gmac;
-       int enh_desc;
-       int tx_coe;
-       int rx_coe;
-       int bugged_jumbo;
-       int pmt;
-       int riwt_off;
-       void (*fix_mac_speed)(void *priv, unsigned int speed);
-       void (*bus_setup)(void __iomem *ioaddr);
-       void *(*setup)(struct platform_device *pdev);
-       void (*free)(struct platform_device *pdev, void *priv);
-       int (*init)(struct platform_device *pdev, void *priv);
-       void (*exit)(struct platform_device *pdev, void *priv);
-};
 #endif
index 3ee4c92afd1bd2baf2b90201a9b4af896d020b5f..4519c81304bd26ec6a6fe6520ecd8a1caeaba4dd 100644 (file)
@@ -21,6 +21,8 @@ struct tcf_common {
        struct gnet_stats_rate_est64    tcfc_rate_est;
        spinlock_t                      tcfc_lock;
        struct rcu_head                 tcfc_rcu;
+       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+       struct gnet_stats_queue __percpu *cpu_qstats;
 };
 #define tcf_head       common.tcfc_head
 #define tcf_index      common.tcfc_index
@@ -68,6 +70,17 @@ static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
        kfree(hf->htab);
 }
 
+/* Update lastuse only if needed, to avoid dirtying a cache line.
+ * We use a temp variable to avoid fetching jiffies twice.
+ */
+static inline void tcf_lastuse_update(struct tcf_t *tm)
+{
+       unsigned long now = jiffies;
+
+       if (tm->lastuse != now)
+               tm->lastuse = now;
+}
+
 #ifdef CONFIG_NET_CLS_ACT
 
 #define ACT_P_CREATED 1
@@ -99,14 +112,20 @@ struct tc_action_ops {
 
 int tcf_hash_search(struct tc_action *a, u32 index);
 void tcf_hash_destroy(struct tc_action *a);
-int tcf_hash_release(struct tc_action *a, int bind);
 u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
 int tcf_hash_check(u32 index, struct tc_action *a, int bind);
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-                   int size, int bind);
+                   int size, int bind, bool cpustats);
 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
 void tcf_hash_insert(struct tc_action *a);
 
+int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
+
+static inline int tcf_hash_release(struct tc_action *a, bool bind)
+{
+       return __tcf_hash_release(a, bind, false);
+}
+
 int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
 int tcf_unregister_action(struct tc_action_ops *a);
 int tcf_action_destroy(struct list_head *actions, int bind);
index def59d3a34d5e24bda47e4526f7050c73a164cd7..0c3ac5acb85f5d3ce0d4cc1520dbe399143975cd 100644 (file)
@@ -158,8 +158,8 @@ struct ipv6_stub {
                                 const struct in6_addr *addr);
        int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
                                 const struct in6_addr *addr);
-       int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
-                               struct flowi6 *fl6);
+       int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+                              struct dst_entry **dst, struct flowi6 *fl6);
        void (*udpv6_encap_enable)(void);
        void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
                              const struct in6_addr *daddr,
index 3bd618d3e55dcf8735fb878a1d59bc8cf18bcc2d..2a6b0919e23f71af5f4660fce0a349bfa09b2fd9 100644 (file)
@@ -1297,7 +1297,7 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
        if (max >= to_multiplier * 8)
                return -EINVAL;
 
-       max_latency = (to_multiplier * 8 / max) - 1;
+       max_latency = (to_multiplier * 4 / max) - 1;
        if (latency > 499 || latency > max_latency)
                return -EINVAL;
 
index 2239a37530922682008d9bbe312e6cbfbe97fd4d..c98afc08cc2612e046cd070b22d25aa18a88c457 100644 (file)
@@ -55,6 +55,8 @@
 #define L2CAP_INFO_TIMEOUT             msecs_to_jiffies(4000)
 #define L2CAP_MOVE_TIMEOUT             msecs_to_jiffies(4000)
 #define L2CAP_MOVE_ERTX_TIMEOUT                msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD     msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT         msecs_to_jiffies(10000)
 
 #define L2CAP_A2MP_DEFAULT_MTU         670
 
index c28aca25320ebaa7c02172fbfee4827a5d207d6b..1797235cd590c361eb216d33558b95e441087d3e 100644 (file)
@@ -66,6 +66,7 @@ enum {
        BOND_OPT_AD_ACTOR_SYS_PRIO,
        BOND_OPT_AD_ACTOR_SYSTEM,
        BOND_OPT_AD_USER_PORT_KEY,
+       BOND_OPT_NUM_PEER_NOTIF_ALIAS,
        BOND_OPT_LAST
 };
 
index 290a9a69af0788794619b0ededc4a6ccfbab5e07..382f94b59f2f706eab23f2f6ebe9c2f007c5ed2e 100644 (file)
@@ -34,6 +34,8 @@ struct cfg802154_ops {
                                                           int type);
        void    (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
                                               struct net_device *dev);
+       int     (*suspend)(struct wpan_phy *wpan_phy);
+       int     (*resume)(struct wpan_phy *wpan_phy);
        int     (*add_virtual_intf)(struct wpan_phy *wpan_phy,
                                    const char *name,
                                    unsigned char name_assign_type,
index c15d39456e146196b24bafed159099baa57e03e9..ccd6d8bffa4d8d0744c70c3591f948d6520b634a 100644 (file)
@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
        if (classid != sk->sk_classid)
                sk->sk_classid = classid;
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+       u32 classid = task_cls_state(current)->classid;
+
+       /* Due to the nature of the classifier it is required to ignore all
+        * packets originating from softirq context as accessing `current'
+        * would lead to false results.
+        *
+        * This test assumes that all callers of dev_queue_xmit() explicitly
+        * disable bh. Knowing this, it is possible to detect softirq based
+        * calls by looking at the number of nested bh disable calls because
+        * softirqs always disables bh.
+        */
+       if (in_serving_softirq()) {
+               /* If there is an sk_classid we'll use that. */
+               if (!skb->sk)
+                       return 0;
+
+               classid = skb->sk->sk_classid;
+       }
+
+       return classid;
+}
 #else /* !CONFIG_CGROUP_NET_CLASSID */
 static inline void sock_update_classid(struct sock *sk)
 {
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+       return 0;
+}
 #endif /* CONFIG_CGROUP_NET_CLASSID */
 #endif  /* _NET_CLS_CGROUP_H */
index 2bc73f8a00a9c4d20848a578eca44b99cf1b7281..2578811cef5167e94269bb9967f6b025c824084a 100644 (file)
@@ -57,6 +57,7 @@ struct dst_entry {
 #define DST_FAKE_RTABLE                0x0040
 #define DST_XFRM_TUNNEL                0x0080
 #define DST_XFRM_QUEUE         0x0100
+#define DST_METADATA           0x0200
 
        unsigned short          pending_confirm;
 
@@ -356,6 +357,9 @@ static inline int dst_discard(struct sk_buff *skb)
 }
 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
                int initial_obsolete, unsigned short flags);
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+             struct net_device *dev, int initial_ref, int initial_obsolete,
+             unsigned short flags);
 void __dst_free(struct dst_entry *dst);
 struct dst_entry *dst_destroy(struct dst_entry *dst);
 
@@ -457,7 +461,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
        return dst;
 }
 
-void dst_init(void);
+void dst_subsys_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644 (file)
index 0000000..075f523
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef __NET_DST_METADATA_H
+#define __NET_DST_METADATA_H 1
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+#include <net/dst.h>
+
+struct metadata_dst {
+       struct dst_entry                dst;
+       size_t                          opts_len;
+       union {
+               struct ip_tunnel_info   tun_info;
+       } u;
+};
+
+static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
+{
+       struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
+
+       if (md_dst && md_dst->dst.flags & DST_METADATA)
+               return md_dst;
+
+       return NULL;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb,
+                                                    int family)
+{
+       struct metadata_dst *md_dst = skb_metadata_dst(skb);
+       struct rtable *rt;
+
+       if (md_dst)
+               return &md_dst->u.tun_info;
+
+       switch (family) {
+       case AF_INET:
+               rt = (struct rtable *)skb_dst(skb);
+               if (rt && rt->rt_lwtstate)
+                       return lwt_tun_info(rt->rt_lwtstate);
+               break;
+       }
+
+       return NULL;
+}
+
+static inline bool skb_valid_dst(const struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       return dst && !(dst->flags & DST_METADATA);
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+
+#endif /* __NET_DST_METADATA_H */
index 903a55efbffe50a3507f79b1b5e4b49405f31aa8..4e8f804f45898aae228e7b9f60fdda9e3666271d 100644 (file)
@@ -19,6 +19,7 @@ struct fib_rule {
        u8                      action;
        /* 3 bytes hole, try to use */
        u32                     target;
+       __be64                  tun_id;
        struct fib_rule __rcu   *ctarget;
        struct net              *fr_net;
 
index 8109a159d1b3ba5ced3aa6d2bc10b9f01d274520..3098ae33a1784f920e26dd445a041e9deac1888e 100644 (file)
 
 #define LOOPBACK_IFINDEX       1
 
+struct flowi_tunnel {
+       __be64                  tun_id;
+};
+
 struct flowi_common {
        int     flowic_oif;
        int     flowic_iif;
@@ -30,6 +34,7 @@ struct flowi_common {
 #define FLOWI_FLAG_ANYSRC              0x01
 #define FLOWI_FLAG_KNOWN_NH            0x02
        __u32   flowic_secid;
+       struct flowi_tunnel flowic_tun_key;
 };
 
 union flowi_uli {
@@ -66,6 +71,7 @@ struct flowi4 {
 #define flowi4_proto           __fl_common.flowic_proto
 #define flowi4_flags           __fl_common.flowic_flags
 #define flowi4_secid           __fl_common.flowic_secid
+#define flowi4_tun_key         __fl_common.flowic_tun_key
 
        /* (saddr,daddr) must be grouped, same order as in IP header */
        __be32                  saddr;
@@ -95,6 +101,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
        fl4->flowi4_proto = proto;
        fl4->flowi4_flags = flags;
        fl4->flowi4_secid = 0;
+       fl4->flowi4_tun_key.tun_id = 0;
        fl4->daddr = daddr;
        fl4->saddr = saddr;
        fl4->fl4_dport = dport;
@@ -165,6 +172,7 @@ struct flowi {
 #define flowi_proto    u.__fl_common.flowic_proto
 #define flowi_flags    u.__fl_common.flowic_flags
 #define flowi_secid    u.__fl_common.flowic_secid
+#define flowi_tun_key  u.__fl_common.flowic_tun_key
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
index e1300b3dd597b9a68db7b6dc9c03a8ea238b4e4c..53eead2da74324b47ef8c772663f9bcb4fd1d0e1 100644 (file)
@@ -21,13 +21,11 @@ struct netns_frags {
  * @INET_FRAG_FIRST_IN: first fragment has arrived
  * @INET_FRAG_LAST_IN: final fragment has arrived
  * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
- * @INET_FRAG_EVICTED: frag queue is being evicted
  */
 enum {
        INET_FRAG_FIRST_IN      = BIT(0),
        INET_FRAG_LAST_IN       = BIT(1),
        INET_FRAG_COMPLETE      = BIT(2),
-       INET_FRAG_EVICTED       = BIT(3)
 };
 
 /**
@@ -45,6 +43,7 @@ enum {
  * @flags: fragment queue flags
  * @max_size: maximum received fragment size
  * @net: namespace that this frag belongs to
+ * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
  */
 struct inet_frag_queue {
        spinlock_t              lock;
@@ -59,6 +58,7 @@ struct inet_frag_queue {
        __u8                    flags;
        u16                     max_size;
        struct netns_frags      *net;
+       struct hlist_node       list_evictor;
 };
 
 #define INETFRAGS_HASHSZ       1024
@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
                inet_frag_destroy(q, f);
 }
 
+static inline bool inet_frag_evicting(struct inet_frag_queue *q)
+{
+       return !hlist_unhashed(&q->list_evictor);
+}
+
 /* Memory Tracking Functions. */
 
 /* The default percpu_counter batch size is not big enough to scale to
@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
        return percpu_counter_read(&nf->mem);
 }
 
-static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i)
+static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
 {
-       __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch);
+       __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
 }
 
-static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
+static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
 {
-       __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch);
+       __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
 }
 
 static inline void init_frag_mem_limit(struct netns_frags *nf)
index b73c88a19dd408f0de41f87c80242816fac4b19d..b07d126694a7aa5d5910e2d4126522aebd602a98 100644 (file)
@@ -205,8 +205,8 @@ void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+void __inet_hash(struct sock *sk, struct sock *osk);
 void inet_hash(struct sock *sk);
 void inet_unhash(struct sock *sk);
 
index 360c4802288db91a38b435bcf5b5d2eb71a8cd1f..879d6e5a973b4ae1af54d6b0c6103c02ee774991 100644 (file)
@@ -100,10 +100,8 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
 void inet_twsk_free(struct inet_timewait_sock *tw);
 void inet_twsk_put(struct inet_timewait_sock *tw);
 
-int inet_twsk_unhash(struct inet_timewait_sock *tw);
-
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
-                         struct inet_hashinfo *hashinfo);
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+                          struct inet_hashinfo *hashinfo);
 
 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
                                           struct inet_timewait_death_row *dr,
@@ -113,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
                           struct inet_hashinfo *hashinfo);
 
 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw);
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
 
 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
                     struct inet_timewait_death_row *twdr, int family);
index 0750a186ea635678efe15b2619f32e91f86fde99..bee5f3582e38873e8e773e31c8ccda454f249234 100644 (file)
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
 }
 
 /* datagram.c */
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 
 void ip4_datagram_release_cb(struct sock *sk);
@@ -369,22 +370,6 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
        flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 }
 
-static inline void inet_set_txhash(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct flow_keys keys;
-
-       memset(&keys, 0, sizeof(keys));
-
-       keys.addrs.v4addrs.src = inet->inet_saddr;
-       keys.addrs.v4addrs.dst = inet->inet_daddr;
-       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
-       keys.ports.src = inet->inet_sport;
-       keys.ports.dst = inet->inet_dport;
-
-       sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
 {
        const struct iphdr *iph = skb_gro_network_header(skb);
index 3b76849c190fc2ce79b59d07466a05182d2b99fe..276328e3daa64a0d493a141e5690eb6dfe18c805 100644 (file)
@@ -51,6 +51,8 @@ struct fib6_config {
        struct nlattr   *fc_mp;
 
        struct nl_info  fc_nlinfo;
+       struct nlattr   *fc_encap;
+       u16             fc_encap_type;
 };
 
 struct fib6_node {
@@ -131,6 +133,7 @@ struct rt6_info {
        /* more non-fragment space at head required */
        unsigned short                  rt6i_nfheader_len;
        u8                              rt6i_protocol;
+       struct lwtunnel_state           *rt6i_lwtstate;
 };
 
 static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
index 49c142bdf01e67b55bcf59c2d1e63f6615783ea1..a37d0432bebda440708516c7f270f931764676dd 100644 (file)
@@ -44,7 +44,9 @@ struct fib_config {
        u32                     fc_flow;
        u32                     fc_nlflags;
        struct nl_info          fc_nlinfo;
- };
+       struct nlattr           *fc_encap;
+       u16                     fc_encap_type;
+};
 
 struct fib_info;
 struct rtable;
@@ -89,6 +91,7 @@ struct fib_nh {
        struct rtable __rcu * __percpu *nh_pcpu_rth_output;
        struct rtable __rcu     *nh_rth_input;
        struct fnhe_hash_bucket __rcu *nh_exceptions;
+       struct lwtunnel_state   *nh_lwtstate;
 };
 
 /*
@@ -183,7 +186,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
 struct fib_table {
        struct hlist_node       tb_hlist;
        u32                     tb_id;
-       int                     tb_default;
        int                     tb_num_default;
        struct rcu_head         rcu;
        unsigned long           *tb_data;
@@ -290,7 +292,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
 int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
                        u8 tos, int oif, struct net_device *dev,
                        struct in_device *idev, u32 *itag);
-void fib_select_default(struct fib_result *res);
+void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static inline int fib_num_tclassid_users(struct net *net)
 {
index d8214cb88bbcfa6524a7d1900c543a45a05f7f31..47984415f5d1e7758d6a1c8b0feaeb55f902a7dd 100644 (file)
@@ -9,9 +9,9 @@
 #include <net/dsfield.h>
 #include <net/gro_cells.h>
 #include <net/inet_ecn.h>
-#include <net/ip.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
+#include <net/lwtunnel.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 /* Keep error state on tunnel for 30 sec */
 #define IPTUNNEL_ERR_TIMEO     (30*HZ)
 
+/* Used to memset ip_tunnel padding. */
+#define IP_TUNNEL_KEY_SIZE                                     \
+       (offsetof(struct ip_tunnel_key, tp_dst) +               \
+        FIELD_SIZEOF(struct ip_tunnel_key, tp_dst))
+
+struct ip_tunnel_key {
+       __be64                  tun_id;
+       __be32                  ipv4_src;
+       __be32                  ipv4_dst;
+       __be16                  tun_flags;
+       __u8                    ipv4_tos;
+       __u8                    ipv4_ttl;
+       __be16                  tp_src;
+       __be16                  tp_dst;
+} __packed __aligned(4); /* Minimize padding. */
+
+/* Indicates whether the tunnel info structure represents receive
+ * or transmit tunnel parameters.
+ */
+enum {
+       IP_TUNNEL_INFO_RX,
+       IP_TUNNEL_INFO_TX,
+};
+
+struct ip_tunnel_info {
+       struct ip_tunnel_key    key;
+       const void              *options;
+       u8                      options_len;
+       u8                      mode;
+};
+
 /* 6rd prefix/relay information */
 #ifdef CONFIG_IPV6_SIT_6RD
 struct ip_tunnel_6rd_parm {
@@ -136,6 +167,47 @@ int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
                            unsigned int num);
 
+static inline void __ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
+                                        __be32 saddr, __be32 daddr,
+                                        u8 tos, u8 ttl,
+                                        __be16 tp_src, __be16 tp_dst,
+                                        __be64 tun_id, __be16 tun_flags,
+                                        const void *opts, u8 opts_len)
+{
+       tun_info->key.tun_id = tun_id;
+       tun_info->key.ipv4_src = saddr;
+       tun_info->key.ipv4_dst = daddr;
+       tun_info->key.ipv4_tos = tos;
+       tun_info->key.ipv4_ttl = ttl;
+       tun_info->key.tun_flags = tun_flags;
+
+       /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+        * the upper tunnel are used.
+        * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+        */
+       tun_info->key.tp_src = tp_src;
+       tun_info->key.tp_dst = tp_dst;
+
+       /* Clear struct padding. */
+       if (sizeof(tun_info->key) != IP_TUNNEL_KEY_SIZE)
+               memset((unsigned char *)&tun_info->key + IP_TUNNEL_KEY_SIZE,
+                      0, sizeof(tun_info->key) - IP_TUNNEL_KEY_SIZE);
+
+       tun_info->options = opts;
+       tun_info->options_len = opts_len;
+}
+
+static inline void ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
+                                      const struct iphdr *iph,
+                                      __be16 tp_src, __be16 tp_dst,
+                                      __be64 tun_id, __be16 tun_flags,
+                                      const void *opts, u8 opts_len)
+{
+       __ip_tunnel_info_init(tun_info, iph->saddr, iph->daddr,
+                             iph->tos, iph->ttl, tp_src, tp_dst,
+                             tun_id, tun_flags, opts, opts_len);
+}
+
 #ifdef CONFIG_INET
 
 int ip_tunnel_init(struct net_device *dev);
@@ -221,6 +293,44 @@ static inline void iptunnel_xmit_stats(int err,
        }
 }
 
+static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info, size_t n)
+{
+       return info + 1;
+}
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+       return (struct ip_tunnel_info *)lwtstate->data;
+}
+
+extern struct static_key ip_tunnel_metadata_cnt;
+
+/* Returns > 0 if metadata should be collected */
+static inline int ip_tunnel_collect_metadata(void)
+{
+       return static_key_false(&ip_tunnel_metadata_cnt);
+}
+
+void __init ip_tunnel_core_init(void);
+
+void ip_tunnel_need_metadata(void);
+void ip_tunnel_unneed_metadata(void);
+
+#else /* CONFIG_INET */
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+       return NULL;
+}
+
+static inline void ip_tunnel_need_metadata(void)
+{
+}
+
+static inline void ip_tunnel_unneed_metadata(void)
+{
+}
+
 #endif /* CONFIG_INET */
 
 #endif /* __NET_IP_TUNNELS_H */
index 82dbdb092a5d1c43d088fea8055c1bcafee156c5..711cca428cc8cd56b40de704265ad73262a6d2a4 100644 (file)
@@ -707,54 +707,69 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline void ip6_set_txhash(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct ipv6_pinfo *np = inet6_sk(sk);
-       struct flow_keys keys;
 
-       memset(&keys, 0, sizeof(keys));
+/* Sysctl settings for net ipv6.auto_flowlabels */
+#define IP6_AUTO_FLOW_LABEL_OFF                0
+#define IP6_AUTO_FLOW_LABEL_OPTOUT     1
+#define IP6_AUTO_FLOW_LABEL_OPTIN      2
+#define IP6_AUTO_FLOW_LABEL_FORCED     3
 
-       memcpy(&keys.addrs.v6addrs.src, &np->saddr,
-              sizeof(keys.addrs.v6addrs.src));
-       memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
-              sizeof(keys.addrs.v6addrs.dst));
-       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
-       keys.ports.src = inet->inet_sport;
-       keys.ports.dst = inet->inet_dport;
+#define IP6_AUTO_FLOW_LABEL_MAX                IP6_AUTO_FLOW_LABEL_FORCED
 
-       sk->sk_txhash = flow_hash_from_keys(&keys);
-}
+#define IP6_DEFAULT_AUTO_FLOW_LABELS   IP6_AUTO_FLOW_LABEL_OPTOUT
 
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
-                                       __be32 flowlabel, bool autolabel)
+                                       __be32 flowlabel, bool autolabel,
+                                       struct flowi6 *fl6)
 {
-       if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
-               u32 hash;
+       u32 hash;
+
+       if (flowlabel ||
+           net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+           (!autolabel &&
+            net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+               return flowlabel;
 
-               hash = skb_get_hash(skb);
+       hash = skb_get_hash_flowi6(skb, fl6);
 
-               /* Since this is being sent on the wire obfuscate hash a bit
-                * to minimize possbility that any useful information to an
-                * attacker is leaked. Only lower 20 bits are relevant.
-                */
-               hash ^= hash >> 12;
+       /* Since this is being sent on the wire obfuscate hash a bit
+        * to minimize possbility that any useful information to an
+        * attacker is leaked. Only lower 20 bits are relevant.
+        */
+       rol32(hash, 16);
 
-               flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+       flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
 
-               if (net->ipv6.sysctl.flowlabel_state_ranges)
-                       flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
-       }
+       if (net->ipv6.sysctl.flowlabel_state_ranges)
+               flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
 
        return flowlabel;
 }
+
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+       switch (net->ipv6.sysctl.auto_flowlabels) {
+       case IP6_AUTO_FLOW_LABEL_OFF:
+       case IP6_AUTO_FLOW_LABEL_OPTIN:
+       default:
+               return 0;
+       case IP6_AUTO_FLOW_LABEL_OPTOUT:
+       case IP6_AUTO_FLOW_LABEL_FORCED:
+               return 1;
+       }
+}
 #else
 static inline void ip6_set_txhash(struct sock *sk) { }
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
-                                       __be32 flowlabel, bool autolabel)
+                                       __be32 flowlabel, bool autolabel,
+                                       struct flowi6 *fl6)
 {
        return flowlabel;
 }
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+       return 0;
+}
 #endif
 
 
@@ -832,7 +847,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
                              &inet6_sk(sk)->cork);
 }
 
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+                  struct flowi6 *fl6);
 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
                                      const struct in6_addr *final_dst);
 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644 (file)
index 0000000..33bd309
--- /dev/null
@@ -0,0 +1,147 @@
+#ifndef __NET_LWTUNNEL_H
+#define __NET_LWTUNNEL_H 1
+
+#include <linux/lwtunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/route.h>
+
+#define LWTUNNEL_HASH_BITS   7
+#define LWTUNNEL_HASH_SIZE   (1 << LWTUNNEL_HASH_BITS)
+
+/* lw tunnel state flags */
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT 0x1
+
+struct lwtunnel_state {
+       __u16           type;
+       __u16           flags;
+       atomic_t        refcnt;
+       int             len;
+       __u8            data[0];
+};
+
+struct lwtunnel_encap_ops {
+       int (*build_state)(struct net_device *dev, struct nlattr *encap,
+                          struct lwtunnel_state **ts);
+       int (*output)(struct sock *sk, struct sk_buff *skb);
+       int (*fill_encap)(struct sk_buff *skb,
+                         struct lwtunnel_state *lwtstate);
+       int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+       int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+};
+
+#ifdef CONFIG_LWTUNNEL
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+       if (lws)
+               atomic_inc(&lws->refcnt);
+
+       return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+       if (!lws)
+               return;
+
+       if (atomic_dec_and_test(&lws->refcnt))
+               kfree(lws);
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+       if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
+               return true;
+
+       return false;
+}
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+                          unsigned int num);
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+                          unsigned int num);
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+                        struct nlattr *encap,
+                        struct lwtunnel_state **lws);
+int lwtunnel_fill_encap(struct sk_buff *skb,
+                       struct lwtunnel_state *lwtstate);
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
+struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_output6(struct sock *sk, struct sk_buff *skb);
+
+#else
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+       return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+       return false;
+}
+
+static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+                                        unsigned int num)
+{
+       return -EOPNOTSUPP;
+
+}
+
+static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+                                        unsigned int num)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+                                      struct nlattr *encap,
+                                      struct lwtunnel_state **lws)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_fill_encap(struct sk_buff *skb,
+                                     struct lwtunnel_state *lwtstate)
+{
+       return 0;
+}
+
+static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+       return 0;
+}
+
+static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
+{
+       return NULL;
+}
+
+static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
+                                    struct lwtunnel_state *b)
+{
+       return 0;
+}
+
+static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* __NET_LWTUNNEL_H */
index f534a46911dc3967e78700f30f8e7e47ca9acec8..b7f99615224bd05d7e4cb926aca27f14f44aab81 100644 (file)
@@ -320,23 +320,6 @@ int ieee802154_register_hw(struct ieee802154_hw *hw);
  */
 void ieee802154_unregister_hw(struct ieee802154_hw *hw);
 
-/**
- * ieee802154_rx - receive frame
- *
- * Use this function to hand received frames to mac802154. The receive
- * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
- * paged @skb is used, the driver is recommended to put the ieee802154
- * header of the frame on the linear part of the @skb to avoid memory
- * allocation and/or memcpy by the stack.
- *
- * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other.
- *
- * @hw: the hardware this frame came in on
- * @skb: the buffer to receive, owned by mac802154 after this call
- */
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
-
 /**
  * ieee802154_rx_irqsafe - receive frame
  *
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644 (file)
index 0000000..4757997
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 Cumulus Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_IPTUNNEL_H
+#define _NET_MPLS_IPTUNNEL_H 1
+
+#define MAX_NEW_LABELS 2
+
+struct mpls_iptunnel_encap {
+       u32     label[MAX_NEW_LABELS];
+       u32     labels;
+};
+
+static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
+{
+       return (struct mpls_iptunnel_encap *)lwtstate->data;
+}
+
+#endif
index 095433b8a8b03dec4b99057dd7165c9dc6d7846f..37cd3911d5c59e97fe6328a2852ea17040f4dbc3 100644 (file)
@@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max;
 extern unsigned int nf_conntrack_hash_rnd;
 void init_nf_conntrack_hash_rnd(void);
 
-void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
 
 #define NF_CT_STAT_INC(net, count)       __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
index 29d6a94db54d6136b6b380d5817341d2bbe83bce..723b61c82b3f444aee0e7591b8028cb2548e228e 100644 (file)
@@ -68,7 +68,6 @@ struct ct_pcpu {
        spinlock_t              lock;
        struct hlist_nulls_head unconfirmed;
        struct hlist_nulls_head dying;
-       struct hlist_nulls_head tmpl;
 };
 
 struct netns_ct {
index 8d93544a2d2b5f21c7ed8b8137394df0758dbef3..c0368db6df54d78a1122c45aff412bcafa84b412 100644 (file)
@@ -31,6 +31,7 @@ struct netns_sysctl_ipv6 {
        int auto_flowlabels;
        int icmpv6_time;
        int anycast_src_echo_reply;
+       int ip_nonlocal_bind;
        int fwmark_reflect;
        int idgen_retries;
        int idgen_delay;
index 532e4ba64f49a8ef8b8c52a9bd312acdf2011943..38aa4983e2a90bd5a391e844e0556591e5d85f60 100644 (file)
@@ -14,5 +14,6 @@ struct netns_nf {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *nf_log_dir_header;
 #endif
+       struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 };
 #endif
index fe22d03afb6a218b6b2dfa7d5329632b8d4936ae..2d45f419477fedadeef42818e7159c148dfd6b84 100644 (file)
@@ -66,6 +66,7 @@ struct rtable {
 
        struct list_head        rt_uncached;
        struct uncached_list    *rt_uncached_list;
+       struct lwtunnel_state   *rt_lwtstate;
 };
 
 static inline bool rt_is_input_route(const struct rtable *rt)
index 343d922d15c2ce0ce1a53dd55d7c006a42e15c99..18fdb98185ab36ae750a2d77078ea16ac87dd0e6 100644 (file)
@@ -141,6 +141,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
                                    unsigned char name_assign_type,
                                    const struct rtnl_link_ops *ops,
                                    struct nlattr *tb[]);
+int rtnl_delete_link(struct net_device *dev);
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
index 2738f6f8790836b1b88d5163e5ba297b0f4421c0..2eab08c38e3283efd696bfff4198a16ad27c1d16 100644 (file)
@@ -513,17 +513,20 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
        bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
 }
 
-static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
-                                          const struct sk_buff *skb)
+static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+                                    const struct sk_buff *skb)
 {
-       struct gnet_stats_basic_cpu *bstats =
-                               this_cpu_ptr(sch->cpu_bstats);
-
        u64_stats_update_begin(&bstats->syncp);
        bstats_update(&bstats->bstats, skb);
        u64_stats_update_end(&bstats->syncp);
 }
 
+static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
+                                          const struct sk_buff *skb)
+{
+       bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+}
+
 static inline void qdisc_bstats_update(struct Qdisc *sch,
                                       const struct sk_buff *skb)
 {
@@ -547,16 +550,24 @@ static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
        sch->qstats.drops += count;
 }
 
-static inline void qdisc_qstats_drop(struct Qdisc *sch)
+static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
 {
-       sch->qstats.drops++;
+       qstats->drops++;
 }
 
-static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
 {
-       struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+       qstats->overlimits++;
+}
 
-       qstats->drops++;
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+       qstats_drop_inc(&sch->qstats);
+}
+
+static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
+{
+       qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
 }
 
 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
index 05a8c1aea25187c1692efcb1e350bb2c7f75a30b..43c6abcf06abc0a5bf0d56bb9235e6800a1fc111 100644 (file)
@@ -429,7 +429,9 @@ struct sock {
        void                    *sk_security;
 #endif
        __u32                   sk_mark;
+#ifdef CONFIG_CGROUP_NET_CLASSID
        u32                     sk_classid;
+#endif
        struct cg_proto         *sk_cgrp;
        void                    (*sk_state_change)(struct sock *sk);
        void                    (*sk_data_ready)(struct sock *sk);
@@ -902,7 +904,7 @@ void sk_stream_kill_queues(struct sock *sk);
 void sk_set_memalloc(struct sock *sk);
 void sk_clear_memalloc(struct sock *sk);
 
-int sk_wait_data(struct sock *sk, long *timeo);
+int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
 
 struct request_sock_ops;
 struct timewait_sock_ops;
@@ -1685,6 +1687,20 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
 kuid_t sock_i_uid(struct sock *sk);
 unsigned long sock_i_ino(struct sock *sk);
 
+static inline void sk_set_txhash(struct sock *sk)
+{
+       sk->sk_txhash = prandom_u32();
+
+       if (unlikely(!sk->sk_txhash))
+               sk->sk_txhash = 1;
+}
+
+static inline void sk_rethink_txhash(struct sock *sk)
+{
+       if (sk->sk_txhash)
+               sk_set_txhash(sk);
+}
+
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
 {
@@ -1709,6 +1725,8 @@ static inline void dst_negative_advice(struct sock *sk)
 {
        struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 
+       sk_rethink_txhash(sk);
+
        if (dst && dst->ops->negative_advice) {
                ndst = dst->ops->negative_advice(dst);
 
index d5671f118bfc54566a34d48708e35b69af19b417..89da8934519bb720c4d5f1f479040a889e6079fd 100644 (file)
@@ -157,6 +157,9 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                            struct net_device *dev,
                            struct net_device *filter_dev, int idx);
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+                                struct net_device *group_dev,
+                                bool joining);
 
 #else
 
@@ -271,6 +274,12 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
        return -EOPNOTSUPP;
 }
 
+static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
+                                              struct net_device *group_dev,
+                                              bool joining)
+{
+}
+
 #endif
 
 #endif /* _LINUX_SWITCHDEV_H_ */
index 9fc9b578908ab868dcc0ef5986358ccb603bf5f9..592a6bc02b0b535087e9f2afee844b5ea37c666b 100644 (file)
@@ -6,9 +6,10 @@
 struct tcf_gact {
        struct tcf_common       common;
 #ifdef CONFIG_GACT_PROB
-        u16                    tcfg_ptype;
-        u16                    tcfg_pval;
-        int                    tcfg_paction;
+       u16                     tcfg_ptype;
+       u16                     tcfg_pval;
+       int                     tcfg_paction;
+       atomic_t                packets;
 #endif
 };
 #define to_gact(a) \
index 4dd77a1c106b246b0abc9d8af3d6dc67fa748b5c..dae96bae1c19c2d71fa7c0ea65e74d064e3b3757 100644 (file)
@@ -8,7 +8,7 @@ struct tcf_mirred {
        int                     tcfm_eaction;
        int                     tcfm_ifindex;
        int                     tcfm_ok_push;
-       struct net_device       *tcfm_dev;
+       struct net_device __rcu *tcfm_dev;
        struct list_head        tcfm_list;
 };
 #define to_mirred(a) \
index 950cfecaad3c0d01c646c4fd111eca8d0cf8aef3..364426a2be5a0f7f0a2e6daaf6ce9b9a2f3e3304 100644 (file)
@@ -989,6 +989,11 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 
 #define TCP_INFINITE_SSTHRESH  0x7fffffff
 
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+       return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 {
        return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1065,7 +1070,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
        const struct tcp_sock *tp = tcp_sk(sk);
 
        /* If in slow start, ensure cwnd grows to twice what was ACKed. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                return tp->snd_cwnd < 2 * tp->max_packets_out;
 
        return tp->is_cwnd_limited;
index 68f0ecad6c6e211e8f6dac90214fff93c539eb0f..1a47946f95ba46a9ad9c2de262c9638f6a6776f4 100644 (file)
@@ -33,9 +33,6 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 
 static inline void twsk_destructor(struct sock *sk)
 {
-       BUG_ON(sk == NULL);
-       BUG_ON(sk->sk_prot == NULL);
-       BUG_ON(sk->sk_prot->twsk_prot == NULL);
        if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
                sk->sk_prot->twsk_prot->twsk_destructor(sk);
 }
index 0082b5d33d7d3f2ea66fe94c26b8c3572188affc..eb8d721cdb676af4a5842e810fdd447f0536691d 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/udp.h>
+#include <net/dst_metadata.h>
 
 #define VNI_HASH_BITS  10
 #define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
@@ -94,20 +95,18 @@ struct vxlanhdr {
 #define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
 
+#define VNI_HASH_BITS  10
+#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS  8
+#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
+
 struct vxlan_metadata {
-       __be32          vni;
        u32             gbp;
 };
 
-struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
-                          struct vxlan_metadata *md);
-
 /* per UDP socket information */
 struct vxlan_sock {
        struct hlist_node hlist;
-       vxlan_rcv_t      *rcv;
-       void             *data;
        struct work_struct del_work;
        struct socket    *sock;
        struct rcu_head   rcu;
@@ -117,6 +116,57 @@ struct vxlan_sock {
        u32               flags;
 };
 
+union vxlan_addr {
+       struct sockaddr_in sin;
+       struct sockaddr_in6 sin6;
+       struct sockaddr sa;
+};
+
+struct vxlan_rdst {
+       union vxlan_addr         remote_ip;
+       __be16                   remote_port;
+       u32                      remote_vni;
+       u32                      remote_ifindex;
+       struct list_head         list;
+       struct rcu_head          rcu;
+};
+
+struct vxlan_config {
+       union vxlan_addr        remote_ip;
+       union vxlan_addr        saddr;
+       u32                     vni;
+       int                     remote_ifindex;
+       int                     mtu;
+       __be16                  dst_port;
+       __u16                   port_min;
+       __u16                   port_max;
+       __u8                    tos;
+       __u8                    ttl;
+       u32                     flags;
+       unsigned long           age_interval;
+       unsigned int            addrmax;
+       bool                    no_share;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+       struct hlist_node hlist;        /* vni hash table */
+       struct list_head  next;         /* vxlan's per namespace list */
+       struct vxlan_sock *vn_sock;     /* listening socket */
+       struct net_device *dev;
+       struct net        *net;         /* netns for packet i/o */
+       struct vxlan_rdst default_dst;  /* default destination */
+       u32               flags;        /* VXLAN_F_* in vxlan.h */
+
+       struct timer_list age_timer;
+       spinlock_t        hash_lock;
+       unsigned int      addrcnt;
+
+       struct vxlan_config     cfg;
+
+       struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
 #define VXLAN_F_LEARN                  0x01
 #define VXLAN_F_PROXY                  0x02
 #define VXLAN_F_RSC                    0x04
@@ -130,6 +180,8 @@ struct vxlan_sock {
 #define VXLAN_F_REMCSUM_RX             0x400
 #define VXLAN_F_GBP                    0x800
 #define VXLAN_F_REMCSUM_NOPARTIAL      0x1000
+#define VXLAN_F_COLLECT_METADATA       0x2000
+#define VXLAN_F_FLOW_BASED             0x4000
 
 /* Flags that are used in the receive path. These flags must match in
  * order for a socket to be shareable
@@ -137,18 +189,17 @@ struct vxlan_sock {
 #define VXLAN_F_RCV_FLAGS              (VXLAN_F_GBP |                  \
                                         VXLAN_F_UDP_ZERO_CSUM6_RX |    \
                                         VXLAN_F_REMCSUM_RX |           \
-                                        VXLAN_F_REMCSUM_NOPARTIAL)
-
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-                                 vxlan_rcv_t *rcv, void *data,
-                                 bool no_share, u32 flags);
+                                        VXLAN_F_REMCSUM_NOPARTIAL |    \
+                                        VXLAN_F_COLLECT_METADATA |     \
+                                        VXLAN_F_FLOW_BASED)
 
-void vxlan_sock_release(struct vxlan_sock *vs);
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+                                   u8 name_assign_type, struct vxlan_config *conf);
 
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-                  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
-                  bool xnet, u32 vxflags);
+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
+{
+       return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
+}
 
 static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
                                                     netdev_features_t features)
index 986fddb085796035a44c69e48779ec84393415f8..b0f898e3b2e733307100cd4cf80159bc88972b6d 100644 (file)
@@ -1745,6 +1745,7 @@ struct ib_device {
        char                         node_desc[64];
        __be64                       node_guid;
        u32                          local_dma_lkey;
+       u16                          is_switch:1;
        u8                           node_type;
        u8                           phys_port_cnt;
 
@@ -1823,6 +1824,20 @@ int ib_query_port(struct ib_device *device,
 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
                                               u8 port_num);
 
+/**
+ * rdma_cap_ib_switch - Check if the device is IB switch
+ * @device: Device to check
+ *
+ * Device driver is responsible for setting is_switch bit on
+ * in ib_device structure at init time.
+ *
+ * Return: true if the device is IB switch.
+ */
+static inline bool rdma_cap_ib_switch(const struct ib_device *device)
+{
+       return device->is_switch;
+}
+
 /**
  * rdma_start_port - Return the first valid port number for the device
  * specified
@@ -1833,7 +1848,7 @@ enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
  */
 static inline u8 rdma_start_port(const struct ib_device *device)
 {
-       return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
+       return rdma_cap_ib_switch(device) ? 0 : 1;
 }
 
 /**
@@ -1846,8 +1861,7 @@ static inline u8 rdma_start_port(const struct ib_device *device)
  */
 static inline u8 rdma_end_port(const struct ib_device *device)
 {
-       return (device->node_type == RDMA_NODE_IB_SWITCH) ?
-               0 : device->phys_port_cnt;
+       return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
 }
 
 static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
index cdb05dd1d4401134fcef914cefaa1338eb629437..d40d3ef25707bd7979a36d9e6e094a394d9f915e 100644 (file)
@@ -119,6 +119,7 @@ extern struct srp_rport *srp_rport_add(struct Scsi_Host *,
 extern void srp_rport_del(struct srp_rport *);
 extern int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo,
                         int dev_loss_tmo);
+int srp_parse_tmo(int *tmo, const char *buf);
 extern int srp_reconnect_rport(struct srp_rport *rport);
 extern void srp_start_tl_fail_timers(struct srp_rport *rport);
 extern void srp_remove_host(struct Scsi_Host *);
index 34117b8b72e49d84fb9477326ad10a490de1060a..0aedbb2c10e0451c162118988d6c070efcd9b629 100644 (file)
@@ -595,6 +595,7 @@ struct iscsi_conn {
        int                     bitmap_id;
        int                     rx_thread_active;
        struct task_struct      *rx_thread;
+       struct completion       rx_login_comp;
        int                     tx_thread_active;
        struct task_struct      *tx_thread;
        /* list_head for session connection list */
index b6fce900a8334613f45f169a0c28802327222f56..fbdd11851725ffa77435b0d3f66cfdfd4f7bb5fb 100644 (file)
@@ -32,7 +32,7 @@
 #ifndef __AMDGPU_DRM_H__
 #define __AMDGPU_DRM_H__
 
-#include <drm/drm.h>
+#include "drm.h"
 
 #define DRM_AMDGPU_GEM_CREATE          0x00
 #define DRM_AMDGPU_GEM_MMAP            0x01
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device {
        uint32_t vram_type;
        /** video memory bit width*/
        uint32_t vram_bit_width;
+       /* vce harvesting instance */
+       uint32_t vce_harvest_config;
 };
 
 struct drm_amdgpu_info_hw_ip {
index 6e1a2ed116cb1410958f61631a7dc30839652738..db809b722985d3a1a03480cc1e3d36a656af2d2d 100644 (file)
@@ -1070,6 +1070,14 @@ struct drm_i915_reg_read {
        __u64 offset;
        __u64 val; /* Return value */
 };
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ *   single instruction 8byte read, in order to workaround that use
+ *   offset (0x2538 | 1) instead.
+ *
+ */
 
 struct drm_i915_reset_stats {
        __u32 ctx_id;
index 1ef76661e1a1b5cc7a1d1021aefd2501f8e5375e..01aa2a8e3f8de1bc368ab53f3f5db472f0329fa7 100644 (file)
@@ -33,7 +33,7 @@
 #ifndef __RADEON_DRM_H__
 #define __RADEON_DRM_H__
 
-#include <drm/drm.h>
+#include "drm.h"
 
 /* WARNING: If you change any of these defines, make sure to change the
  * defines in the X server file (radeon_sarea.h)
index 1ff9942718fee60ba3ef4dede12499e2d49a6fd9..aafb9937b162b47ce047e6becb180b7cd6d3c447 100644 (file)
@@ -243,6 +243,7 @@ header-y += limits.h
 header-y += llc.h
 header-y += loop.h
 header-y += lp.h
+header-y += lwtunnel.h
 header-y += magic.h
 header-y += major.h
 header-y += map_to_7segment.h
index 29ef6f99e43d1d46586fb308c15b0c65bfc33e17..2ce13c109b006a5504946d785adaabe563a57a5e 100644 (file)
@@ -249,6 +249,27 @@ enum bpf_func_id {
         * Return: 0 on success
         */
        BPF_FUNC_get_current_comm,
+
+       /**
+        * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+        * @skb: pointer to skb
+        * Return: classid if != 0
+        */
+       BPF_FUNC_get_cgroup_classid,
+       BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
+       BPF_FUNC_skb_vlan_pop,  /* bpf_skb_vlan_pop(skb) */
+
+       /**
+        * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
+        * retrieve or populate tunnel metadata
+        * @skb: pointer to skb
+        * @key: pointer to 'struct bpf_tunnel_key'
+        * @size: size of 'struct bpf_tunnel_key'
+        * @flags: room for future extensions
+        * Retrun: 0 on success
+        */
+       BPF_FUNC_skb_get_tunnel_key,
+       BPF_FUNC_skb_set_tunnel_key,
        __BPF_FUNC_MAX_ID,
 };
 
@@ -269,6 +290,12 @@ struct __sk_buff {
        __u32 ifindex;
        __u32 tc_index;
        __u32 cb[5];
+       __u32 hash;
+};
+
+struct bpf_tunnel_key {
+       __u32 tunnel_id;
+       __u32 remote_ipv4;
 };
 
 #endif /* _UAPI__LINUX_BPF_H__ */
index cd67aec187d9fab31aacba01974b8f2d145b393e..cd1629170103ef77a580a3694472e4a1452613fe 100644 (file)
@@ -1093,6 +1093,11 @@ struct ethtool_sfeatures {
  * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
  * respectively.  For example, if the device supports HWTSTAMP_TX_ON,
  * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ *
+ * Drivers should only report the filters they actually support without
+ * upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
+ * HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
+ * driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
  */
 struct ethtool_ts_info {
        __u32   cmd;
index 2b82d7e30974f93b9a93afc2fdf5c9137492043d..96161b8202b5d026ed39904f15a899659fc39adb 100644 (file)
@@ -43,7 +43,7 @@ enum {
        FRA_UNUSED5,
        FRA_FWMARK,     /* mark */
        FRA_FLOW,       /* flow/class id */
-       FRA_UNUSED6,
+       FRA_TUN_ID,
        FRA_SUPPRESS_IFGROUP,
        FRA_SUPPRESS_PREFIXLEN,
        FRA_TABLE,      /* Extended table id */
index eaaea6208b424e7ef4fd361646b07fc497180a12..3635b77975085a5801d9d4a5555beaf70a623441 100644 (file)
@@ -182,6 +182,7 @@ struct br_mdb_entry {
 #define MDB_TEMPORARY 0
 #define MDB_PERMANENT 1
        __u8 state;
+       __u16 vid;
        struct {
                union {
                        __be32  ip4;
index 2c7e8e3d3981e7a70154f0239cfec64946c93623..ea047480a1f0ddca2025fd12db262b7bb359f9c6 100644 (file)
@@ -148,6 +148,7 @@ enum {
        IFLA_PHYS_SWITCH_ID,
        IFLA_LINK_NETNSID,
        IFLA_PHYS_PORT_NAME,
+       IFLA_PROTO_DOWN,
        __IFLA_MAX
 };
 
@@ -381,6 +382,8 @@ enum {
        IFLA_VXLAN_REMCSUM_RX,
        IFLA_VXLAN_GBP,
        IFLA_VXLAN_REMCSUM_NOPARTIAL,
+       IFLA_VXLAN_FLOWBASED,
+       IFLA_VXLAN_COLLECT_METADATA,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -431,6 +434,7 @@ enum {
        IFLA_BOND_AD_ACTOR_SYS_PRIO,
        IFLA_BOND_AD_USER_PORT_KEY,
        IFLA_BOND_AD_ACTOR_SYSTEM,
+       IFLA_BOND_TLB_DYNAMIC_LB,
        __IFLA_BOND_MAX,
 };
 
index 5efa54ae567ca933a15dc1210b5af2cb569951c6..80f3b74446a1a3e56704550138e816014537e60f 100644 (file)
@@ -171,6 +171,8 @@ enum {
        DEVCONF_USE_OPTIMISTIC,
        DEVCONF_ACCEPT_RA_MTU,
        DEVCONF_STABLE_SECRET,
+       DEVCONF_USE_OIF_ADDRS_ONLY,
+       DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
        DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
new file mode 100644 (file)
index 0000000..31377bb
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _UAPI_LWTUNNEL_H_
+#define _UAPI_LWTUNNEL_H_
+
+#include <linux/types.h>
+
+enum lwtunnel_encap_types {
+       LWTUNNEL_ENCAP_NONE,
+       LWTUNNEL_ENCAP_MPLS,
+       LWTUNNEL_ENCAP_IP,
+       __LWTUNNEL_ENCAP_MAX,
+};
+
+#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
+
+
+#endif /* _UAPI_LWTUNNEL_H_ */
index 139d4dd1cab83f1cee0399e9924138a24d350ba7..24a6cb1aec86f2a73ffcb717779249970bbb31dc 100644 (file)
@@ -41,4 +41,6 @@ struct mpls_label {
 #define MPLS_LABEL_OAMALERT            14 /* RFC3429 */
 #define MPLS_LABEL_EXTENSION           15 /* RFC7274 */
 
+#define MPLS_LABEL_FIRST_UNRESERVED    16 /* RFC3032 */
+
 #endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
new file mode 100644 (file)
index 0000000..d80a049
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *     mpls tunnel api
+ *
+ *     Authors:
+ *             Roopa Prabhu <roopa@cumulusnetworks.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_MPLS_IPTUNNEL_H
+#define _UAPI_LINUX_MPLS_IPTUNNEL_H
+
+/* MPLS tunnel attributes
+ * [RTA_ENCAP] = {
+ *     [MPLS_IPTUNNEL_DST]
+ * }
+ */
+enum {
+       MPLS_IPTUNNEL_UNSPEC,
+       MPLS_IPTUNNEL_DST,
+       __MPLS_IPTUNNEL_MAX,
+};
+#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_MPLS_IPTUNNEL_H */
index ceeefe6681b5bdc8bda6893375d689ce837311df..ed4e776e1242f586cf1062646903e9afc7bb1af6 100644 (file)
@@ -13,6 +13,8 @@ enum sctp_conntrack {
        SCTP_CONNTRACK_SHUTDOWN_SENT,
        SCTP_CONNTRACK_SHUTDOWN_RECD,
        SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+       SCTP_CONNTRACK_HEARTBEAT_SENT,
+       SCTP_CONNTRACK_HEARTBEAT_ACKED,
        SCTP_CONNTRACK_MAX
 };
 
index 1ab0b97b3a1e68336485b2679045024e86ff5444..f2c10dc140d60934cdaf938ded428a7d517e5a87 100644 (file)
@@ -92,6 +92,8 @@ enum ctattr_timeout_sctp {
        CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
        CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
        CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+       CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+       CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
        __CTA_TIMEOUT_SCTP_MAX
 };
 #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
index 1dab77601c217c19bb7b0432fca4dd5371b0111d..d6b8854601872063bec49ffca7b5f0a0aa7a38be 100644 (file)
@@ -321,7 +321,7 @@ enum ovs_key_attr {
                                 * the accepted length of the array. */
 
 #ifdef __KERNEL__
-       OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ovs_tunnel_info */
+       OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
 #endif
        __OVS_KEY_ATTR_MAX
 };
index fdd8f07f1d34bd419dbd2c3f62eb09a659842cb4..47d24cb3fbc1f8017f715dff06ab5aa5f977f6b0 100644 (file)
@@ -286,6 +286,21 @@ enum rt_class_t {
 
 /* Routing message attributes */
 
+enum ip_tunnel_t {
+       IP_TUN_UNSPEC,
+       IP_TUN_ID,
+       IP_TUN_DST,
+       IP_TUN_SRC,
+       IP_TUN_TTL,
+       IP_TUN_TOS,
+       IP_TUN_SPORT,
+       IP_TUN_DPORT,
+       IP_TUN_FLAGS,
+       __IP_TUN_MAX,
+};
+
+#define IP_TUN_MAX (__IP_TUN_MAX - 1)
+
 enum rtattr_type_t {
        RTA_UNSPEC,
        RTA_DST,
@@ -308,6 +323,8 @@ enum rtattr_type_t {
        RTA_VIA,
        RTA_NEWDST,
        RTA_PREF,
+       RTA_ENCAP_TYPE,
+       RTA_ENCAP,
        __RTA_MAX
 };
 
index eee8968407f063b5d9c4776a30ebe45e5b782f2d..25a9ad8bcef1240915f2553a8acade447186d869 100644 (file)
@@ -278,6 +278,8 @@ enum
        LINUX_MIB_TCPACKSKIPPEDCHALLENGE,       /* TCPACKSkippedChallenge */
        LINUX_MIB_TCPWINPROBE,                  /* TCPWinProbe */
        LINUX_MIB_TCPKEEPALIVE,                 /* TCPKeepAlive */
+       LINUX_MIB_TCPMTUPFAIL,                  /* TCPMTUPFail */
+       LINUX_MIB_TCPMTUPSUCCESS,               /* TCPMTUPSuccess */
        __LINUX_MIB_MAX
 };
 
index 7bbee79ca2933f518a16f0250e7bbfd0a4e85fe0..ec32293a00db057eb7559c2e7cadce01deb3ef22 100644 (file)
@@ -34,6 +34,7 @@
 /* The feature bitmap for virtio net */
 #define VIRTIO_NET_F_CSUM      0       /* Host handles pkts w/ partial csum */
 #define VIRTIO_NET_F_GUEST_CSUM        1       /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
 #define VIRTIO_NET_F_MAC       5       /* Host has given MAC address. */
 #define VIRTIO_NET_F_GUEST_TSO4        7       /* Guest can handle TSOv4 in. */
 #define VIRTIO_NET_F_GUEST_TSO6        8       /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
  #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN        1
  #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX        0x8000
 
+/*
+ * Control network offloads
+ *
+ * Reconfigures the network offloads that Guest can handle.
+ *
+ * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
+ *
+ * Command data format matches the feature bit mask exactly.
+ *
+ * See VIRTIO_NET_F_GUEST_* for the list of offloads
+ * that can be enabled/disabled.
+ */
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS   5
+#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET        0
+
 #endif /* _LINUX_VIRTIO_NET_H */
index 75301468359f0c558ff8c3c12450301c80cff19a..90007a1abcab144ac3d6ac7d6e6f4001d58abb14 100644 (file)
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
        __le32 queue_used_hi;           /* read-write */
 };
 
+/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
+struct virtio_pci_cfg_cap {
+       struct virtio_pci_cap cap;
+       __u8 pci_cfg_data[4]; /* Data for BAR access. */
+};
+
 /* Macro versions of offsets for the Old Timers! */
 #define VIRTIO_PCI_CAP_VNDR            0
 #define VIRTIO_PCI_CAP_NEXT            1
index 915980ac68dfa8cc1dc973b8a7e659fc56383c22..c07295969b7e134ec85bf58b72b100949a6462fa 100644 (file)
@@ -31,6 +31,9 @@
  * SUCH DAMAGE.
  *
  * Copyright Rusty Russell IBM Corporation 2007. */
+#ifndef __KERNEL__
+#include <stdint.h>
+#endif
 #include <linux/types.h>
 #include <linux/virtio_types.h>
 
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
        vr->num = num;
        vr->desc = p;
        vr->avail = p + num*sizeof(struct vring_desc);
-       vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16)
+       vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
                + align-1) & ~(align - 1));
 }
 
index 12215205ab8d0b11d854dcaa29f00bed48a980c7..785c5ca0994b5ab41e43fcfe6553197f6633dd06 100644 (file)
 
 /*
  * Block Header.
- * This header preceeds all object and object arrays below.
+ * This header precedes all object and object arrays below.
  */
 struct snd_soc_tplg_hdr {
        __le32 magic;           /* magic number */
@@ -222,7 +222,7 @@ struct snd_soc_tplg_stream_config {
 /*
  * Manifest. List totals for each payload type. Not used in parsing, but will
  * be passed to the component driver before any other objects in order for any
- * global componnent resource allocations.
+ * global component resource allocations.
  *
  * File block representation for manifest :-
  * +-----------------------------------+----+
index c5bedc82bc1c540bf466c4d2e64f8663a974536d..fafa741614453c46ec9eb90d340221dcf9c3cc85 100644 (file)
@@ -177,6 +177,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        return 0;
 }
+EXPORT_SYMBOL_GPL(__bpf_call_base);
 
 /**
  *     __bpf_prog_run - run eBPF program on a given context
@@ -453,7 +454,11 @@ select_insn:
                if (unlikely(!prog))
                        goto out;
 
-               ARG1 = BPF_R1;
+               /* ARG1 at this point is guaranteed to point to CTX from
+                * the verifier side due to the fact that the tail call is
+                * handeled like a helper, that is, bpf_tail_call_proto,
+                * where arg1_type is ARG_PTR_TO_CTX.
+                */
                insn = prog->insnsi;
                goto select_insn;
 out:
index 039d866fd36ab0e1d553166acbf2aa8b86bbab06..cd307df98cb33fca49c2e02704336f0bdd7c736d 100644 (file)
@@ -648,6 +648,9 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
        struct verifier_state *state = &env->cur_state;
        int size, err = 0;
 
+       if (state->regs[regno].type == PTR_TO_STACK)
+               off += state->regs[regno].imm;
+
        size = bpf_size_to_bytes(bpf_size);
        if (size < 0)
                return size;
@@ -667,7 +670,8 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
                if (!err && t == BPF_READ && value_regno >= 0)
                        mark_reg_unknown_value(state->regs, value_regno);
 
-       } else if (state->regs[regno].type == FRAME_PTR) {
+       } else if (state->regs[regno].type == FRAME_PTR ||
+                  state->regs[regno].type == PTR_TO_STACK) {
                if (off >= 0 || off < -MAX_BPF_STACK) {
                        verbose("invalid stack off=%d size=%d\n", off, size);
                        return -EACCES;
index 6a374544d495f1ff7b7f5bfd03fd798e30144a7e..5644ec5582b93a600accc90c9211c618cd3735fb 100644 (file)
@@ -527,18 +527,9 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
                goto out_notify;
        }
 
-       /*
-        * Some architectures have to walk the irq descriptors to
-        * setup the vector space for the cpu which comes online.
-        * Prevent irq alloc/free across the bringup.
-        */
-       irq_lock_sparse();
-
        /* Arch-specific enabling code. */
        ret = __cpu_up(cpu, idle);
 
-       irq_unlock_sparse();
-
        if (ret != 0)
                goto out_notify;
        BUG_ON(!cpu_online(cpu));
index 1bfefc6f96a4ea92507741cf1e935c6dab04c2b1..dbd9b8d7b7cc2baa8c425bf35191ce8dfd117ecf 100644 (file)
@@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested)
        max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
 }
 
+#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
+/* Initialized by the architecture: */
+int arch_task_struct_size __read_mostly;
+#endif
+
 void __init fork_init(void)
 {
 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
@@ -295,7 +300,7 @@ void __init fork_init(void)
 #endif
        /* create a slab on which task_structs can be allocated */
        task_struct_cachep =
-               kmem_cache_create("task_struct", sizeof(struct task_struct),
+               kmem_cache_create("task_struct", arch_task_struct_size,
                        ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
 #endif
 
index 9065107f083e90e3fbd3c38f2fbc164d11ff2b3c..7a5237a1bce5b5626d74cdb377f8e5a900d06f7a 100644 (file)
@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
                    !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
 #ifdef CONFIG_HARDIRQS_SW_RESEND
                        /*
-                        * If the interrupt has a parent irq and runs
-                        * in the thread context of the parent irq,
-                        * retrigger the parent.
+                        * If the interrupt is running in the thread
+                        * context of the parent irq we need to be
+                        * careful, because we cannot trigger it
+                        * directly.
                         */
-                       if (desc->parent_irq &&
-                           irq_settings_is_nested_thread(desc))
+                       if (irq_settings_is_nested_thread(desc)) {
+                               /*
+                                * If the parent_irq is valid, we
+                                * retrigger the parent, otherwise we
+                                * do nothing.
+                                */
+                               if (!desc->parent_irq)
+                                       return;
                                irq = desc->parent_irq;
+                       }
                        /* Set it pending and activate the softirq: */
                        set_bit(irq, irqs_resend);
                        tasklet_schedule(&resend_tasklet);
index 90552aab5f2dd076c147c73cc6f9ff59aabb7af6..fed052a1bc9f5792c7cb856336f2093e75aa2432 100644 (file)
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
 {
        struct resource *p;
        resource_size_t end = start + size - 1;
-       int flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
        const char *name = "System RAM";
        int ret = -1;
 
        read_lock(&resource_lock);
        for (p = iomem_resource.child; p ; p = p->sibling) {
-               if (end < p->start)
+               if (p->end < start)
                        continue;
 
                if (p->start <= start && end <= p->end) {
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
                                ret = 1;
                        break;
                }
-               if (p->end < start)
+               if (end < p->start)
                        break;  /* not found */
        }
        read_unlock(&resource_lock);
index 65c8f3ebdc3c5d58d148780c84c90fe54dc6e4dc..d113c3ba8bc44b4f50a12e9c055cb70b63a632fc 100644 (file)
@@ -3683,7 +3683,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        cfs_rq->throttled = 1;
        cfs_rq->throttled_clock = rq_clock(rq);
        raw_spin_lock(&cfs_b->lock);
-       empty = list_empty(&cfs_rq->throttled_list);
+       empty = list_empty(&cfs_b->throttled_cfs_rq);
 
        /*
         * Add to the _head_ of the list, so that an already-started
index 52b9e199b5acc1e292d7e5a030685f6dbe7fdfe2..f6aae7977824ab7595950e378c2dd0f0bbe0256d 100644 (file)
@@ -839,7 +839,6 @@ out:
        raw_spin_unlock(&tick_broadcast_lock);
        return ret;
 }
-EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
 
 /*
  * Reset the one shot broadcast for a cpu
index 55e13efff1ab0d7431b81ea951a66feec714728f..f8bf47571dda4748f0d3be7574591566ccac8936 100644 (file)
@@ -363,6 +363,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
 
        return __tick_broadcast_oneshot_control(state);
 }
+EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
 
 #ifdef CONFIG_HOTPLUG_CPU
 /*
index 02bece4a99ea36bb835fc45a9aa55c1aedd69f9f..eb11011b5292add880af7038800560aa29c5a674 100644 (file)
@@ -98,6 +98,13 @@ struct ftrace_pid {
        struct pid *pid;
 };
 
+static bool ftrace_pids_enabled(void)
+{
+       return !list_empty(&ftrace_pids);
+}
+
+static void ftrace_update_trampoline(struct ftrace_ops *ops);
+
 /*
  * ftrace_disabled is set when an anomaly is discovered.
  * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
 static struct ftrace_ops control_ops;
 
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
        if (!test_tsk_trace_trace(current))
                return;
 
-       ftrace_pid_function(ip, parent_ip, op, regs);
-}
-
-static void set_ftrace_pid_function(ftrace_func_t func)
-{
-       /* do not set ftrace_pid_function to itself! */
-       if (func != ftrace_pid_func)
-               ftrace_pid_function = func;
+       op->saved_func(ip, parent_ip, op, regs);
 }
 
 /**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
 void clear_ftrace_function(void)
 {
        ftrace_trace_function = ftrace_stub;
-       ftrace_pid_function = ftrace_stub;
 }
 
 static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        } else
                add_ftrace_ops(&ftrace_ops_list, ops);
 
+       /* Always save the function, and reset at unregistering */
+       ops->saved_func = ops->func;
+
+       if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
+               ops->func = ftrace_pid_func;
+
        ftrace_update_trampoline(ops);
 
        if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (ftrace_enabled)
                update_ftrace_function();
 
+       ops->func = ops->saved_func;
+
        return 0;
 }
 
 static void ftrace_update_pid_func(void)
 {
+       bool enabled = ftrace_pids_enabled();
+       struct ftrace_ops *op;
+
        /* Only do something if we are tracing something */
        if (ftrace_trace_function == ftrace_stub)
                return;
 
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->flags & FTRACE_OPS_FL_PID) {
+                       op->func = enabled ? ftrace_pid_func :
+                               op->saved_func;
+                       ftrace_update_trampoline(op);
+               }
+       } while_for_each_ftrace_op(op);
+
        update_ftrace_function();
 }
 
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
        .local_hash.filter_hash         = EMPTY_HASH,
        INIT_OPS_HASH(global_ops)
        .flags                          = FTRACE_OPS_FL_RECURSION_SAFE |
-                                         FTRACE_OPS_FL_INITIALIZED,
+                                         FTRACE_OPS_FL_INITIALIZED |
+                                         FTRACE_OPS_FL_PID,
 };
 
 /*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
 
 static struct ftrace_ops global_ops = {
        .func                   = ftrace_stub,
-       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
+       .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
+                                 FTRACE_OPS_FL_INITIALIZED |
+                                 FTRACE_OPS_FL_PID,
 };
 
 static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
                if (WARN_ON(tr->ops->func != ftrace_stub))
                        printk("ftrace ops had %pS for function\n",
                               tr->ops->func);
-               /* Only the top level instance does pid tracing */
-               if (!list_empty(&ftrace_pids)) {
-                       set_ftrace_pid_function(func);
-                       func = ftrace_pid_func;
-               }
        }
        tr->ops->func = func;
        tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
 {
        mutex_lock(&ftrace_lock);
 
-       if (list_empty(&ftrace_pids) && (!*pos))
+       if (!ftrace_pids_enabled() && (!*pos))
                return (void *) 1;
 
        return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
        .func                   = ftrace_stub,
        .flags                  = FTRACE_OPS_FL_RECURSION_SAFE |
                                   FTRACE_OPS_FL_INITIALIZED |
+                                  FTRACE_OPS_FL_PID |
                                   FTRACE_OPS_FL_STUB,
 #ifdef FTRACE_GRAPH_TRAMP_ADDR
        .trampoline             = FTRACE_GRAPH_TRAMP_ADDR,
index f060716b02ae25b45494b0f350df6fe2fc0f369b..74bde81601a902759e5714b04a5a987ddbde2d15 100644 (file)
@@ -444,6 +444,7 @@ enum {
 
        TRACE_CONTROL_BIT,
 
+       TRACE_BRANCH_BIT,
 /*
  * Abuse of the trace_recursion.
  * As we need a way to maintain state if we are tracing the function
index a87b43f49eb448afa3f1cc2bc44763d71f06dd52..e2e12ad3186f440f7841819a9f260509d5afde43 100644 (file)
@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        struct trace_branch *entry;
        struct ring_buffer *buffer;
        unsigned long flags;
-       int cpu, pc;
+       int pc;
        const char *p;
 
+       if (current->trace_recursion & TRACE_BRANCH_BIT)
+               return;
+
        /*
         * I would love to save just the ftrace_likely_data pointer, but
         * this code can also be used by modules. Ugly things can happen
@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
        if (unlikely(!tr))
                return;
 
-       local_irq_save(flags);
-       cpu = raw_smp_processor_id();
-       data = per_cpu_ptr(tr->trace_buffer.data, cpu);
-       if (atomic_inc_return(&data->disabled) != 1)
+       raw_local_irq_save(flags);
+       current->trace_recursion |= TRACE_BRANCH_BIT;
+       data = this_cpu_ptr(tr->trace_buffer.data);
+       if (atomic_read(&data->disabled))
                goto out;
 
        pc = preempt_count();
@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
                __buffer_unlock_commit(buffer, event);
 
  out:
-       atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       current->trace_recursion &= ~TRACE_BRANCH_BIT;
+       raw_local_irq_restore(flags);
 }
 
 static inline
index 528ff932d8e445ae9e19940d4f13cd20c0273bc7..62696dff5730be1927edfdafc7bc314e71226a88 100644 (file)
@@ -59,8 +59,11 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, long len,
 {
        const struct compress_format *cf;
 
-       if (len < 2)
+       if (len < 2) {
+               if (name)
+                       *name = NULL;
                return NULL;    /* Need at least this much... */
+       }
 
        pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]);
 
index ae4b65e17e6486c7431fdc39335eab4cb15a0539..dace71fe41f707457468ec1ca3f8d94f756313a8 100644 (file)
@@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
        unsigned long flags;
        phys_addr_t cln;
 
+       if (dma_debug_disabled())
+               return;
+
        if (!page)
                return;
 
index 7ea09699855d98db901737bb6cfad30f4b32149f..8d74c20d8595c76d3882fcc02a7fa00f8db1bfaa 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/ctype.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
+#include <asm/unaligned.h>
 
 const char hex_asc[] = "0123456789abcdef";
 EXPORT_SYMBOL(hex_asc);
@@ -139,7 +140,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
                for (j = 0; j < ngroups; j++) {
                        ret = snprintf(linebuf + lx, linebuflen - lx,
                                       "%s%16.16llx", j ? " " : "",
-                                      (unsigned long long)*(ptr8 + j));
+                                      get_unaligned(ptr8 + j));
                        if (ret >= linebuflen - lx)
                                goto overflow1;
                        lx += ret;
@@ -150,7 +151,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
                for (j = 0; j < ngroups; j++) {
                        ret = snprintf(linebuf + lx, linebuflen - lx,
                                       "%s%8.8x", j ? " " : "",
-                                      *(ptr4 + j));
+                                      get_unaligned(ptr4 + j));
                        if (ret >= linebuflen - lx)
                                goto overflow1;
                        lx += ret;
@@ -161,7 +162,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
                for (j = 0; j < ngroups; j++) {
                        ret = snprintf(linebuf + lx, linebuflen - lx,
                                       "%s%4.4x", j ? " " : "",
-                                      *(ptr2 + j));
+                                      get_unaligned(ptr2 + j));
                        if (ret >= linebuflen - lx)
                                goto overflow1;
                        lx += ret;
index 2e3bd01964a9047f481862c2323be8c4ea42db9c..3e3a5c3cb330dfd39a5a24a0dc638c5f4ecdbfe0 100644 (file)
@@ -337,8 +337,9 @@ error:
 }
 EXPORT_SYMBOL(kobject_init);
 
-static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
-                           const char *fmt, va_list vargs)
+static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
+                                          struct kobject *parent,
+                                          const char *fmt, va_list vargs)
 {
        int retval;
 
index 7f58c735d745049025407806e972bb4c7f124888..3afddf2026c983d279fe822a0c42b7f1a65f193a 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/filter.h>
+#include <linux/bpf.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
@@ -355,6 +356,81 @@ static int bpf_fill_ja(struct bpf_test *self)
        return __bpf_fill_ja(self, 12, 9);
 }
 
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       for (i = 0; i < len - 1; i += 2) {
+               insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+               insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                        SKF_AD_OFF + SKF_AD_CPU);
+       }
+
+       insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+#define PUSH_CNT 68
+/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct bpf_insn *insn;
+       int i = 0, j, k = 0;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       insn[i++] = BPF_MOV64_REG(R6, R1);
+loop:
+       for (j = 0; j < PUSH_CNT; j++) {
+               insn[i++] = BPF_LD_ABS(BPF_B, 0);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+               i++;
+               insn[i++] = BPF_MOV64_REG(R1, R6);
+               insn[i++] = BPF_MOV64_IMM(R2, 1);
+               insn[i++] = BPF_MOV64_IMM(R3, 2);
+               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                        bpf_skb_vlan_push_proto.func - __bpf_call_base);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+               i++;
+       }
+
+       for (j = 0; j < PUSH_CNT; j++) {
+               insn[i++] = BPF_LD_ABS(BPF_B, 0);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+               i++;
+               insn[i++] = BPF_MOV64_REG(R1, R6);
+               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                        bpf_skb_vlan_pop_proto.func - __bpf_call_base);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+               i++;
+       }
+       if (++k < 5)
+               goto loop;
+
+       for (; i < len - 1; i++)
+               insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
+
+       insn[len - 1] = BPF_EXIT_INSN();
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
 static struct bpf_test tests[] = {
        {
                "TAX",
@@ -3674,6 +3750,9 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
                        BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+                       BPF_ALU64_REG(BPF_MOV, R1, R0),
+                       BPF_ALU64_IMM(BPF_RSH, R1, 32),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
@@ -3708,6 +3787,9 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
                        BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+                       BPF_ALU64_REG(BPF_MOV, R1, R0),
+                       BPF_ALU64_IMM(BPF_RSH, R1, 32),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
@@ -4392,6 +4474,22 @@ static struct bpf_test tests[] = {
                { { 0, 0xababcbac } },
                .fill_helper = bpf_fill_maxinsns11,
        },
+       {
+               "BPF_MAXINSNS: ld_abs+get_processor_id",
+               { },
+               CLASSIC,
+               { },
+               { { 1, 0xbee } },
+               .fill_helper = bpf_fill_ld_abs_get_processor_id,
+       },
+       {
+               "BPF_MAXINSNS: ld_abs+vlan_push/pop",
+               { },
+               INTERNAL,
+               { 0x34 },
+               { { 1, 0xbef } },
+               .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+       },
 };
 
 static struct net_device dev;
@@ -4515,6 +4613,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
                }
 
                fp->len = flen;
+               /* Type doesn't really matter here as long as it's not unspec. */
+               fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
                memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
                bpf_prog_select_runtime(fp);
@@ -4545,14 +4645,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
        u64 start, finish;
        int ret = 0, i;
 
-       start = ktime_to_us(ktime_get());
+       start = ktime_get_ns();
 
        for (i = 0; i < runs; i++)
                ret = BPF_PROG_RUN(fp, data);
 
-       finish = ktime_to_us(ktime_get());
+       finish = ktime_get_ns();
 
-       *duration = (finish - start) * 1000ULL;
+       *duration = finish - start;
        do_div(*duration, runs);
 
        return ret;
index c90777eae1f837f84b1b53fd8704b567b9d90835..9af7cefb195d3d3537366396d0af31180292ece9 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/rcupdate.h>
 #include <linux/rhashtable.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 
 #define MAX_ENTRIES    1000000
 #define TEST_INSERT_FAIL INT_MAX
@@ -87,6 +88,8 @@ static int __init test_rht_lookup(struct rhashtable *ht)
                                return -EINVAL;
                        }
                }
+
+               cond_resched_rcu();
        }
 
        return 0;
@@ -160,6 +163,8 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
                } else if (err) {
                        return err;
                }
+
+               cond_resched();
        }
 
        if (insert_fails)
@@ -183,6 +188,8 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
 
                        rhashtable_remove_fast(ht, &obj->node, test_rht_params);
                }
+
+               cond_resched();
        }
 
        end = ktime_get_ns();
index 7621ee34daa0dea1dd0246238e7eb5bc613cfe4c..f8e4b60db167215862824637d856ffb34332f071 100644 (file)
@@ -39,7 +39,7 @@ static int cma_used_get(void *data, u64 *val)
 
        mutex_lock(&cma->lock);
        /* pages counter is smaller than sizeof(int) */
-       used = bitmap_weight(cma->bitmap, (int)cma->count);
+       used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
        mutex_unlock(&cma->lock);
        *val = (u64)used << cma->order_per_bit;
 
@@ -52,13 +52,14 @@ static int cma_maxchunk_get(void *data, u64 *val)
        struct cma *cma = data;
        unsigned long maxchunk = 0;
        unsigned long start, end = 0;
+       unsigned long bitmap_maxno = cma_bitmap_maxno(cma);
 
        mutex_lock(&cma->lock);
        for (;;) {
-               start = find_next_zero_bit(cma->bitmap, cma->count, end);
+               start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
                if (start >= cma->count)
                        break;
-               end = find_next_bit(cma->bitmap, cma->count, start);
+               end = find_next_bit(cma->bitmap, bitmap_maxno, start);
                maxchunk = max(end - start, maxchunk);
        }
        mutex_unlock(&cma->lock);
@@ -170,10 +171,10 @@ static void cma_debugfs_add_one(struct cma *cma, int idx)
 
        tmp = debugfs_create_dir(name, cma_debugfs_root);
 
-       debugfs_create_file("alloc", S_IWUSR, cma_debugfs_root, cma,
+       debugfs_create_file("alloc", S_IWUSR, tmp, cma,
                                &cma_alloc_fops);
 
-       debugfs_create_file("free", S_IWUSR, cma_debugfs_root, cma,
+       debugfs_create_file("free", S_IWUSR, tmp, cma,
                                &cma_free_fops);
 
        debugfs_create_file("base_pfn", S_IRUGO, tmp,
index 506eac8b38afb2cdbeb481dcebad098aac984618..ef19f22b2b7de1728fb4ed8d6451d29bb993a928 100644 (file)
@@ -246,9 +246,7 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
 /* Returns true if the struct page for the pfn is uninitialised */
 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 {
-       int nid = early_pfn_to_nid(pfn);
-
-       if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
+       if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
                return true;
 
        return false;
@@ -1950,6 +1948,7 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
 void split_page(struct page *page, unsigned int order)
 {
        int i;
+       gfp_t gfp_mask;
 
        VM_BUG_ON_PAGE(PageCompound(page), page);
        VM_BUG_ON_PAGE(!page_count(page), page);
@@ -1963,10 +1962,11 @@ void split_page(struct page *page, unsigned int order)
                split_page(virt_to_page(page[0].shadow), order);
 #endif
 
-       set_page_owner(page, 0, 0);
+       gfp_mask = get_page_owner_gfp(page);
+       set_page_owner(page, 0, gfp_mask);
        for (i = 1; i < (1 << order); i++) {
                set_page_refcounted(page + i);
-               set_page_owner(page + i, 0, 0);
+               set_page_owner(page + i, 0, gfp_mask);
        }
 }
 EXPORT_SYMBOL_GPL(split_page);
@@ -1996,6 +1996,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
        zone->free_area[order].nr_free--;
        rmv_page_order(page);
 
+       set_page_owner(page, order, __GFP_MOVABLE);
+
        /* Set the pageblock if the isolated page is at least a pageblock */
        if (order >= pageblock_order - 1) {
                struct page *endpage = page + (1 << order) - 1;
@@ -2007,7 +2009,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
                }
        }
 
-       set_page_owner(page, order, 0);
+
        return 1UL << order;
 }
 
index bd5f842b56d26aca3cf24225884d5a2eacd1580c..983c3a10fa07058df249c64d5c89b9578661f672 100644 (file)
@@ -76,6 +76,13 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
        __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
 }
 
+gfp_t __get_page_owner_gfp(struct page *page)
+{
+       struct page_ext *page_ext = lookup_page_ext(page);
+
+       return page_ext->gfp_mask;
+}
+
 static ssize_t
 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
                struct page *page, struct page_ext *page_ext)
index 94a375c04f21cd5a9b525f7dffb4a14657deea89..9055d7b9d1129d69e34f8ed255922b2e340fcbb1 100644 (file)
@@ -613,6 +613,8 @@ EXPORT_SYMBOL_GPL(lowpan_header_compress);
 
 static int __init lowpan_module_init(void)
 {
+       request_module_nowait("ipv6");
+
        request_module_nowait("nhc_dest");
        request_module_nowait("nhc_fragment");
        request_module_nowait("nhc_hop");
index 9dd49ca67dbc22a905999de21b8355475ba40052..6e70ddb158b4bc121a0f32e7a53fecf8125e8354 100644 (file)
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
 
        mutex_unlock(&virtio_9p_lock);
 
+       vdev->config->reset(vdev);
        vdev->config->del_vqs(vdev);
 
        sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
index 57a7c5af3175d1826f0708c551227e5e4281577e..7021c1bf44d6ce949091cb287232e6cfefed6256 100644 (file)
@@ -374,6 +374,13 @@ source "net/caif/Kconfig"
 source "net/ceph/Kconfig"
 source "net/nfc/Kconfig"
 
+config LWTUNNEL
+       bool "Network light weight tunnels"
+       ---help---
+         This feature provides an infrastructure to support light weight
+         tunnels like mpls. There is no netdevice associated with a light
+         weight tunnel endpoint. Tunnel encapsulation parameters are stored
+         with light weight tunnel state associated with fib routes.
 
 endif   # if NET
 
index cc78538d163bbf05bd6bcf5aa5c8c4954fe8ac66..aa0047c5c4672947b2b7d153dbd8a394e70ccbdf 100644 (file)
@@ -802,13 +802,10 @@ static int br2684_seq_show(struct seq_file *seq, void *v)
                           (brdev->payload == p_bridged) ? "bridged" : "routed",
                           brvcc->copies_failed, brvcc->copies_needed);
 #ifdef CONFIG_ATM_BR2684_IPFILTER
-#define b1(var, byte)  ((u8 *) &brvcc->filter.var)[byte]
-#define bs(var)                b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3)
                if (brvcc->filter.netmask != 0)
-                       seq_printf(seq, "    filter=%d.%d.%d.%d/"
-                                  "%d.%d.%d.%d\n", bs(prefix), bs(netmask));
-#undef bs
-#undef b1
+                       seq_printf(seq, "    filter=%pI4/%pI4\n",
+                                  &brvcc->filter.prefix,
+                                  &brvcc->filter.netmask);
 #endif /* CONFIG_ATM_BR2684_IPFILTER */
        }
        return 0;
index 1997538a5d23d93ddca9724fd3787dc0b0b2595a..3b78e8473a01b4a82e376266b04078e714ce1e26 100644 (file)
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
 {
        ax25_clear_queues(ax25);
 
+       ax25_stop_heartbeat(ax25);
        ax25_stop_t1timer(ax25);
        ax25_stop_t2timer(ax25);
        ax25_stop_t3timer(ax25);
index 2fb7b306490424c62bafd0fe7de6fd83176ed698..0ffe2e24020aa86b80115221811f324511cc1385 100644 (file)
@@ -859,9 +859,22 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
        SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
+       *dev = netdev_priv(netdev);
+       (*dev)->netdev = netdev;
+       (*dev)->hdev = chan->conn->hcon->hdev;
+       INIT_LIST_HEAD(&(*dev)->peers);
+
+       spin_lock(&devices_lock);
+       INIT_LIST_HEAD(&(*dev)->list);
+       list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
+       spin_unlock(&devices_lock);
+
        err = register_netdev(netdev);
        if (err < 0) {
                BT_INFO("register_netdev failed %d", err);
+               spin_lock(&devices_lock);
+               list_del_rcu(&(*dev)->list);
+               spin_unlock(&devices_lock);
                free_netdev(netdev);
                goto out;
        }
@@ -871,16 +884,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
               &chan->src, chan->src_type);
        set_bit(__LINK_STATE_PRESENT, &netdev->state);
 
-       *dev = netdev_priv(netdev);
-       (*dev)->netdev = netdev;
-       (*dev)->hdev = chan->conn->hcon->hdev;
-       INIT_LIST_HEAD(&(*dev)->peers);
-
-       spin_lock(&devices_lock);
-       INIT_LIST_HEAD(&(*dev)->list);
-       list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
-       spin_unlock(&devices_lock);
-
        return 0;
 
 out:
index b8c794b87523857b9a658526ebb92dd21b22dd57..95d1a66ba03aa20095932a1c45f9f76af2cc1393 100644 (file)
@@ -53,6 +53,11 @@ source "net/bluetooth/cmtp/Kconfig"
 
 source "net/bluetooth/hidp/Kconfig"
 
+config BT_HS
+       bool "Bluetooth High Speed (HS) features"
+       depends on BT_BREDR
+       default y
+
 config BT_LE
        bool "Bluetooth Low Energy (LE) features"
        depends on BT
index 29c12ae72a665bf8c730ee5472036fd673dc8f5a..2b15ae8c1def06642682c488a9439f0e57feeb13 100644 (file)
@@ -13,9 +13,10 @@ bluetooth_6lowpan-y := 6lowpan.o
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
        hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
-       a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
+       ecc.o hci_request.o mgmt_util.o
 
 bluetooth-$(CONFIG_BT_BREDR) += sco.o
+bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
index 5a04eb1a7e5762c82109255c2aa035bec9a840dc..5f123c3320a7be1f355d31f1023d3e3996853339 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 
+#include "hci_request.h"
 #include "a2mp.h"
 #include "amp.h"
 
@@ -286,11 +287,21 @@ static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
        return 0;
 }
 
+static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
+                                        u16 opcode)
+{
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       a2mp_send_getinfo_rsp(hdev);
+}
+
 static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
                            struct a2mp_cmd *hdr)
 {
        struct a2mp_info_req *req  = (void *) skb->data;
        struct hci_dev *hdev;
+       struct hci_request hreq;
+       int err = 0;
 
        if (le16_to_cpu(hdr->len) < sizeof(*req))
                return -EINVAL;
@@ -311,7 +322,11 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
        }
 
        set_bit(READ_LOC_AMP_INFO, &mgr->state);
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+       hci_req_init(&hreq, hdev);
+       hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+       err = hci_req_run(&hreq, read_local_amp_info_complete);
+       if (err < 0)
+               a2mp_send_getinfo_rsp(hdev);
 
 done:
        if (hdev)
index 296f665adb09d01c0ffc7fe421bf8a75115bf1ff..a4ff3ea9b38a6e3cfe1a4ea63105458cd07e06a9 100644 (file)
@@ -130,10 +130,29 @@ struct a2mp_physlink_rsp {
 #define A2MP_STATUS_SECURITY_VIOLATION         0x06
 
 struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+
+#if IS_ENABLED(CONFIG_BT_HS)
 int amp_mgr_put(struct amp_mgr *mgr);
 struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
                                       struct sk_buff *skb);
 void a2mp_discover_amp(struct l2cap_chan *chan);
+#else
+static inline int amp_mgr_put(struct amp_mgr *mgr)
+{
+       return 0;
+}
+
+static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+                                                    struct sk_buff *skb)
+{
+       return NULL;
+}
+
+static inline void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+}
+#endif
+
 void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
 void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
 void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
index ee016f03910005de87cc45a7d04b2a3989d90c2e..238ddd3cf95fb660d41f751821a09550f977f067 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <crypto/hash.h>
 
+#include "hci_request.h"
 #include "a2mp.h"
 #include "amp.h"
 
@@ -220,10 +221,49 @@ int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
        return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
 }
 
+static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+                                         u16 opcode, struct sk_buff *skb)
+{
+       struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
+       struct amp_assoc *assoc = &hdev->loc_assoc;
+       size_t rem_len, frag_len;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       if (rp->status)
+               goto send_rsp;
+
+       frag_len = skb->len - sizeof(*rp);
+       rem_len = __le16_to_cpu(rp->rem_len);
+
+       if (rem_len > frag_len) {
+               BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+
+               memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+               assoc->offset += frag_len;
+
+               /* Read other fragments */
+               amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+
+               return;
+       }
+
+       memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+       assoc->len = assoc->offset + rem_len;
+       assoc->offset = 0;
+
+send_rsp:
+       /* Send A2MP Rsp when all fragments are received */
+       a2mp_send_getampassoc_rsp(hdev, rp->status);
+       a2mp_send_create_phy_link_req(hdev, rp->status);
+}
+
 void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
 {
        struct hci_cp_read_local_amp_assoc cp;
        struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+       struct hci_request req;
+       int err = 0;
 
        BT_DBG("%s handle %d", hdev->name, phy_handle);
 
@@ -231,12 +271,18 @@ void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
        cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
        cp.len_so_far = cpu_to_le16(loc_assoc->offset);
 
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+       if (err < 0)
+               a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
 }
 
 void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
 {
        struct hci_cp_read_local_amp_assoc cp;
+       struct hci_request req;
+       int err = 0;
 
        memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
        memset(&cp, 0, sizeof(cp));
@@ -244,7 +290,11 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
        cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
 
        set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_run_skb(&req, read_local_amp_assoc_complete);
+       if (err < 0)
+               a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
 }
 
 void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
@@ -252,6 +302,8 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
 {
        struct hci_cp_read_local_amp_assoc cp;
        struct amp_mgr *mgr = hcon->amp_mgr;
+       struct hci_request req;
+       int err = 0;
 
        cp.phy_handle = hcon->handle;
        cp.len_so_far = cpu_to_le16(0);
@@ -260,7 +312,25 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
        set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
 
        /* Read Local AMP Assoc final link information data */
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_run_skb(&req, read_local_amp_assoc_complete);
+       if (err < 0)
+               a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+}
+
+static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+                                           u16 opcode, struct sk_buff *skb)
+{
+       struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
+
+       BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+              hdev->name, rp->status, rp->phy_handle);
+
+       if (rp->status)
+               return;
+
+       amp_write_rem_assoc_continue(hdev, rp->phy_handle);
 }
 
 /* Write AMP Assoc data fragments, returns true with last fragment written*/
@@ -270,6 +340,7 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
        struct hci_cp_write_remote_amp_assoc *cp;
        struct amp_mgr *mgr = hcon->amp_mgr;
        struct amp_ctrl *ctrl;
+       struct hci_request req;
        u16 frag_len, len;
 
        ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
@@ -307,7 +378,9 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
 
        amp_ctrl_put(ctrl);
 
-       hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_run_skb(&req, write_remote_amp_assoc_complete);
 
        kfree(cp);
 
@@ -344,10 +417,37 @@ void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
        amp_write_rem_assoc_frag(hdev, hcon);
 }
 
+static void create_phylink_complete(struct hci_dev *hdev, u8 status,
+                                   u16 opcode)
+{
+       struct hci_cp_create_phy_link *cp;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+       if (!cp)
+               return;
+
+       hci_dev_lock(hdev);
+
+       if (status) {
+               struct hci_conn *hcon;
+
+               hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+               if (hcon)
+                       hci_conn_del(hcon);
+       } else {
+               amp_write_remote_assoc(hdev, cp->phy_handle);
+       }
+
+       hci_dev_unlock(hdev);
+}
+
 void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                        struct hci_conn *hcon)
 {
        struct hci_cp_create_phy_link cp;
+       struct hci_request req;
 
        cp.phy_handle = hcon->handle;
 
@@ -360,13 +460,33 @@ void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                return;
        }
 
-       hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+       hci_req_run(&req, create_phylink_complete);
+}
+
+static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
+                                   u16 opcode)
+{
+       struct hci_cp_accept_phy_link *cp;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (status)
+               return;
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+       if (!cp)
+               return;
+
+       amp_write_remote_assoc(hdev, cp->phy_handle);
 }
 
 void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                        struct hci_conn *hcon)
 {
        struct hci_cp_accept_phy_link cp;
+       struct hci_request req;
 
        cp.phy_handle = hcon->handle;
 
@@ -379,7 +499,9 @@ void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                return;
        }
 
-       hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+       hci_req_run(&req, accept_phylink_complete);
 }
 
 void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
index 7ea3db77ba890a18876fd3a57ddf6aa27fa9269c..8848f8158ae45d61013b5373ce4d882969178c25 100644 (file)
@@ -44,6 +44,20 @@ void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                        struct hci_conn *hcon);
 void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                        struct hci_conn *hcon);
+
+#if IS_ENABLED(CONFIG_BT_HS)
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+#else
+static inline void amp_create_logical_link(struct l2cap_chan *chan)
+{
+}
+
+static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
+{
+}
+#endif
+
 void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
 void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
 void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
index b0c6c6af76ef07c311ea940d482b3d45ab83696d..9a50338772f3af9875c83f628e110ffa6458781a 100644 (file)
@@ -100,9 +100,9 @@ static void cmtp_application_del(struct cmtp_session *session, struct cmtp_appli
 static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value)
 {
        struct cmtp_application *app;
-       struct list_head *p, *n;
+       struct list_head *p;
 
-       list_for_each_safe(p, n, &session->applications) {
+       list_for_each(p, &session->applications) {
                app = list_entry(p, struct cmtp_application, list);
                switch (pattern) {
                case CMTP_MSGNUM:
@@ -511,13 +511,13 @@ static int cmtp_proc_show(struct seq_file *m, void *v)
        struct capi_ctr *ctrl = m->private;
        struct cmtp_session *session = ctrl->driverdata;
        struct cmtp_application *app;
-       struct list_head *p, *n;
+       struct list_head *p;
 
        seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
        seq_printf(m, "addr %s\n", session->name);
        seq_printf(m, "ctrl %d\n", session->num);
 
-       list_for_each_safe(p, n, &session->applications) {
+       list_for_each(p, &session->applications) {
                app = list_entry(p, struct cmtp_application, list);
                seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
        }
index 2f8fb33067e1c48fedf3055ddf29a3be68161c9f..bc43b6490555c7d75dae8ac452a7b012c8f1ffa1 100644 (file)
@@ -2822,10 +2822,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
 {
        struct hci_conn_params *params;
 
-       /* The conn params list only contains identity addresses */
-       if (!hci_is_identity_address(addr, addr_type))
-               return NULL;
-
        list_for_each_entry(params, &hdev->le_conn_params, list) {
                if (bacmp(&params->addr, addr) == 0 &&
                    params->addr_type == addr_type) {
@@ -2842,10 +2838,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
 {
        struct hci_conn_params *param;
 
-       /* The list only contains identity addresses */
-       if (!hci_is_identity_address(addr, addr_type))
-               return NULL;
-
        list_for_each_entry(param, list, action) {
                if (bacmp(&param->addr, addr) == 0 &&
                    param->addr_type == addr_type)
@@ -2861,9 +2853,6 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
 {
        struct hci_conn_params *params;
 
-       if (!hci_is_identity_address(addr, addr_type))
-               return NULL;
-
        params = hci_conn_params_lookup(hdev, addr, addr_type);
        if (params)
                return params;
index 32363c2b7f83d7b458eb303e2fbe7a8b050539b5..218d7dfc342f484b0b9b18c4208a2ccc5efc0cb8 100644 (file)
@@ -823,7 +823,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
        if (rp->status)
-               goto a2mp_rsp;
+               return;
 
        hdev->amp_status = rp->amp_status;
        hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -835,46 +835,6 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
        hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
        hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
        hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
-
-a2mp_rsp:
-       a2mp_send_getinfo_rsp(hdev);
-}
-
-static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
-                                       struct sk_buff *skb)
-{
-       struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
-       struct amp_assoc *assoc = &hdev->loc_assoc;
-       size_t rem_len, frag_len;
-
-       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
-       if (rp->status)
-               goto a2mp_rsp;
-
-       frag_len = skb->len - sizeof(*rp);
-       rem_len = __le16_to_cpu(rp->rem_len);
-
-       if (rem_len > frag_len) {
-               BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
-               memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
-               assoc->offset += frag_len;
-
-               /* Read other fragments */
-               amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
-               return;
-       }
-
-       memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
-       assoc->len = assoc->offset + rem_len;
-       assoc->offset = 0;
-
-a2mp_rsp:
-       /* Send A2MP Rsp when all fragments are received */
-       a2mp_send_getampassoc_rsp(hdev, rp->status);
-       a2mp_send_create_phy_link_req(hdev, rp->status);
 }
 
 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
@@ -1409,20 +1369,6 @@ static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_unlock(hdev);
 }
 
-static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
-                                         struct sk_buff *skb)
-{
-       struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
-
-       BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
-              hdev->name, rp->status, rp->phy_handle);
-
-       if (rp->status)
-               return;
-
-       amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_rp_read_rssi *rp = (void *) skb->data;
@@ -1944,47 +1890,6 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
        hci_dev_unlock(hdev);
 }
 
-static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
-{
-       struct hci_cp_create_phy_link *cp;
-
-       BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-       cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
-       if (!cp)
-               return;
-
-       hci_dev_lock(hdev);
-
-       if (status) {
-               struct hci_conn *hcon;
-
-               hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
-               if (hcon)
-                       hci_conn_del(hcon);
-       } else {
-               amp_write_remote_assoc(hdev, cp->phy_handle);
-       }
-
-       hci_dev_unlock(hdev);
-}
-
-static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
-{
-       struct hci_cp_accept_phy_link *cp;
-
-       BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-       if (status)
-               return;
-
-       cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
-       if (!cp)
-               return;
-
-       amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
 {
        struct hci_cp_le_create_conn *cp;
@@ -2998,10 +2903,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
                hci_cc_read_clock(hdev, skb);
                break;
 
-       case HCI_OP_READ_LOCAL_AMP_ASSOC:
-               hci_cc_read_local_amp_assoc(hdev, skb);
-               break;
-
        case HCI_OP_READ_INQ_RSP_TX_POWER:
                hci_cc_read_inq_rsp_tx_power(hdev, skb);
                break;
@@ -3106,10 +3007,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
                hci_cc_set_adv_param(hdev, skb);
                break;
 
-       case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
-               hci_cc_write_remote_amp_assoc(hdev, skb);
-               break;
-
        case HCI_OP_READ_RSSI:
                hci_cc_read_rssi(hdev, skb);
                break;
@@ -3193,14 +3090,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
                hci_cs_setup_sync_conn(hdev, ev->status);
                break;
 
-       case HCI_OP_CREATE_PHY_LINK:
-               hci_cs_create_phylink(hdev, ev->status);
-               break;
-
-       case HCI_OP_ACCEPT_PHY_LINK:
-               hci_cs_accept_phylink(hdev, ev->status);
-               break;
-
        case HCI_OP_SNIFF_MODE:
                hci_cs_sniff_mode(hdev, ev->status);
                break;
@@ -4399,6 +4288,23 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
+#if IS_ENABLED(CONFIG_BT_HS)
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_channel_selected *ev = (void *)skb->data;
+       struct hci_conn *hcon;
+
+       BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
+
+       skb_pull(skb, sizeof(*ev));
+
+       hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+       if (!hcon)
+               return;
+
+       amp_read_loc_assoc_final_data(hdev, hcon);
+}
+
 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
                                      struct sk_buff *skb)
 {
@@ -4522,6 +4428,7 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
 
        hci_dev_unlock(hdev);
 }
+#endif
 
 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
@@ -5206,22 +5113,6 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
-static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
-       struct hci_ev_channel_selected *ev = (void *) skb->data;
-       struct hci_conn *hcon;
-
-       BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
-
-       skb_pull(skb, sizeof(*ev));
-
-       hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
-       if (!hcon)
-               return;
-
-       amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
                                 u8 event, struct sk_buff *skb)
 {
@@ -5442,14 +5333,15 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_le_meta_evt(hdev, skb);
                break;
 
-       case HCI_EV_CHANNEL_SELECTED:
-               hci_chan_selected_evt(hdev, skb);
-               break;
-
        case HCI_EV_REMOTE_OOB_DATA_REQUEST:
                hci_remote_oob_data_request_evt(hdev, skb);
                break;
 
+#if IS_ENABLED(CONFIG_BT_HS)
+       case HCI_EV_CHANNEL_SELECTED:
+               hci_chan_selected_evt(hdev, skb);
+               break;
+
        case HCI_EV_PHY_LINK_COMPLETE:
                hci_phy_link_complete_evt(hdev, skb);
                break;
@@ -5465,6 +5357,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
        case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
                hci_disconn_phylink_complete_evt(hdev, skb);
                break;
+#endif
 
        case HCI_EV_NUM_COMP_BLOCKS:
                hci_num_comp_blocks_evt(hdev, skb);
index 244287706f910bdaf69afaef98cf2706cd301654..586b3d580cfcba0422828cab1843363178dfe85c 100644 (file)
@@ -1054,18 +1054,23 @@ static void l2cap_sock_kill(struct sock *sk)
        sock_put(sk);
 }
 
-static int __l2cap_wait_ack(struct sock *sk)
+static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan)
 {
-       struct l2cap_chan *chan = l2cap_pi(sk)->chan;
        DECLARE_WAITQUEUE(wait, current);
        int err = 0;
-       int timeo = HZ/5;
+       int timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
+       /* Timeout to prevent infinite loop */
+       unsigned long timeout = jiffies + L2CAP_WAIT_ACK_TIMEOUT;
 
        add_wait_queue(sk_sleep(sk), &wait);
        set_current_state(TASK_INTERRUPTIBLE);
-       while (chan->unacked_frames > 0 && chan->conn) {
+       do {
+               BT_DBG("Waiting for %d ACKs, timeout %04d ms",
+                      chan->unacked_frames, time_after(jiffies, timeout) ? 0 :
+                      jiffies_to_msecs(timeout - jiffies));
+
                if (!timeo)
-                       timeo = HZ/5;
+                       timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
 
                if (signal_pending(current)) {
                        err = sock_intr_errno(timeo);
@@ -1080,7 +1085,15 @@ static int __l2cap_wait_ack(struct sock *sk)
                err = sock_error(sk);
                if (err)
                        break;
-       }
+
+               if (time_after(jiffies, timeout)) {
+                       err = -ENOLINK;
+                       break;
+               }
+
+       } while (chan->unacked_frames > 0 &&
+                chan->state == BT_CONNECTED);
+
        set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(sk), &wait);
        return err;
@@ -1098,7 +1111,12 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
        if (!sk)
                return 0;
 
+       /* prevent sk structure from being freed whilst unlocked */
+       sock_hold(sk);
+
        chan = l2cap_pi(sk)->chan;
+       /* prevent chan structure from being freed whilst unlocked */
+       l2cap_chan_hold(chan);
        conn = chan->conn;
 
        BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
@@ -1110,8 +1128,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
        lock_sock(sk);
 
        if (!sk->sk_shutdown) {
-               if (chan->mode == L2CAP_MODE_ERTM)
-                       err = __l2cap_wait_ack(sk);
+               if (chan->mode == L2CAP_MODE_ERTM &&
+                   chan->unacked_frames > 0 &&
+                   chan->state == BT_CONNECTED)
+                       err = __l2cap_wait_ack(sk, chan);
 
                sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -1134,6 +1154,11 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
        if (conn)
                mutex_unlock(&conn->chan_lock);
 
+       l2cap_chan_put(chan);
+       sock_put(sk);
+
+       BT_DBG("err: %d", err);
+
        return err;
 }
 
index 7998fb27916568da087b2734a017355158044a75..7ab191589541c8fab56d47ab9f0e21f050751de9 100644 (file)
@@ -6226,6 +6226,17 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
        else
                auto_conn = HCI_AUTO_CONN_REPORT;
 
+       /* Kernel internally uses conn_params with resolvable private
+        * address, but Add Device allows only identity addresses.
+        * Make sure it is enforced before calling
+        * hci_conn_params_lookup.
+        */
+       if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+               err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
+               mgmt_pending_remove(cmd);
+               goto unlock;
+       }
+
        /* If the connection parameters don't exist for this device,
         * they will be created and configured with defaults.
         */
@@ -6340,6 +6351,18 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                else
                        addr_type = ADDR_LE_DEV_RANDOM;
 
+               /* Kernel internally uses conn_params with resolvable private
+                * address, but Remove Device allows only identity addresses.
+                * Make sure it is enforced before calling
+                * hci_conn_params_lookup.
+                */
+               if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+                       err = cmd->cmd_complete(cmd,
+                                               MGMT_STATUS_INVALID_PARAMS);
+                       mgmt_pending_remove(cmd);
+                       goto unlock;
+               }
+
                params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
                                                addr_type);
                if (!params) {
index 3d0f7d2a06162248a1e6589fe3d59022f9dded0d..ad82324f710f0f97427a51c632e5a8aebfaba292 100644 (file)
@@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
                return 1;
 
        chan = conn->smp;
+       if (!chan) {
+               BT_ERR("SMP security requested but not available");
+               return 1;
+       }
 
        if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
                return 1;
index 4ff77a16956c2740cdcae2c1a1e5a38209a7a6c9..0aa8f5cf46a17171c627e6949c51e684f28a58ed 100644 (file)
@@ -339,6 +339,7 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_bridge_getlink      = br_getlink,
        .ndo_bridge_setlink      = br_setlink,
        .ndo_bridge_dellink      = br_dellink,
+       .ndo_features_check      = passthru_features_check,
 };
 
 static void br_dev_free(struct net_device *dev)
index 0ff6e1bbca910a35fc94794d39ac8978085b144d..fa7bfced888ec0f3b7d8a5ceff17b597cbe9b79e 100644 (file)
@@ -37,15 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p,
 
 int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
 {
-       if (!is_skb_forwardable(skb->dev, skb)) {
-               kfree_skb(skb);
-       } else {
-               skb_push(skb, ETH_HLEN);
-               br_drop_fake_rtable(skb);
-               skb_sender_cpu_clear(skb);
-               dev_queue_xmit(skb);
+       if (!is_skb_forwardable(skb->dev, skb))
+               goto drop;
+
+       skb_push(skb, ETH_HLEN);
+       br_drop_fake_rtable(skb);
+       skb_sender_cpu_clear(skb);
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           (skb->protocol == htons(ETH_P_8021Q) ||
+            skb->protocol == htons(ETH_P_8021AD))) {
+               int depth;
+
+               if (!__vlan_get_protocol(skb, skb->protocol, &depth))
+                       goto drop;
+
+               skb_set_network_header(skb, depth);
        }
 
+       dev_queue_xmit(skb);
+
+       return 0;
+
+drop:
+       kfree_skb(skb);
        return 0;
 }
 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
index a538cb1199a3087673790272b61c57a1f2630651..45e4757c6fd25ed05a14e24e2d026204c3e79506 100644 (file)
@@ -281,6 +281,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
        br_fdb_delete_by_port(br, NULL, 0, 1);
 
        br_vlan_flush(br);
+       br_multicast_dev_del(br);
        del_timer_sync(&br->gc_timer);
 
        br_sysfs_delbr(br->dev);
index c11cf2611db0c870542969b6847d0a61d18b64d4..d747275fad18826f96c604505f0583049c456f91 100644 (file)
@@ -85,6 +85,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                                        memset(&e, 0, sizeof(e));
                                        e.ifindex = port->dev->ifindex;
                                        e.state = p->state;
+                                       e.vid = p->addr.vid;
                                        if (p->addr.proto == htons(ETH_P_IP))
                                                e.addr.u.ip4 = p->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -230,7 +231,7 @@ errout:
 }
 
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                  struct br_ip *group, int type)
+                  struct br_ip *group, int type, u8 state)
 {
        struct br_mdb_entry entry;
 
@@ -241,9 +242,78 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
 #if IS_ENABLED(CONFIG_IPV6)
        entry.addr.u.ip6 = group->u.ip6;
 #endif
+       entry.state = state;
+       entry.vid = group->vid;
        __br_mdb_notify(dev, &entry, type);
 }
 
+static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+                                  struct net_device *dev,
+                                  int ifindex, u32 pid,
+                                  u32 seq, int type, unsigned int flags)
+{
+       struct br_port_msg *bpm;
+       struct nlmsghdr *nlh;
+       struct nlattr *nest;
+
+       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       bpm = nlmsg_data(nlh);
+       memset(bpm, 0, sizeof(*bpm));
+       bpm->family = AF_BRIDGE;
+       bpm->ifindex = dev->ifindex;
+       nest = nla_nest_start(skb, MDBA_ROUTER);
+       if (!nest)
+               goto cancel;
+
+       if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
+               goto end;
+
+       nla_nest_end(skb, nest);
+       nlmsg_end(skb, nlh);
+       return 0;
+
+end:
+       nla_nest_end(skb, nest);
+cancel:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static inline size_t rtnl_rtr_nlmsg_size(void)
+{
+       return NLMSG_ALIGN(sizeof(struct br_port_msg))
+               + nla_total_size(sizeof(__u32));
+}
+
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+                  int type)
+{
+       struct net *net = dev_net(dev);
+       struct sk_buff *skb;
+       int err = -ENOBUFS;
+       int ifindex;
+
+       ifindex = port ? port->dev->ifindex : 0;
+       skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
+       if (!skb)
+               goto errout;
+
+       err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto errout;
+       }
+
+       rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
+       return;
+
+errout:
+       rtnl_set_sk_err(net, RTNLGRP_MDB, err);
+}
+
 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
 {
        if (entry->ifindex == 0)
@@ -263,6 +333,8 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
                return false;
        if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
                return false;
+       if (entry->vid >= VLAN_VID_MASK)
+               return false;
 
        return true;
 }
@@ -351,7 +423,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
        if (state == MDB_TEMPORARY)
                mod_timer(&p->timer, now + br->multicast_membership_interval);
 
-       br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
        return 0;
 }
 
@@ -375,6 +446,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
                return -EINVAL;
 
        memset(&ip, 0, sizeof(ip));
+       ip.vid = entry->vid;
        ip.proto = entry->addr.proto;
        if (ip.proto == htons(ETH_P_IP))
                ip.u.ip4 = entry->addr.u.ip4;
@@ -392,8 +464,11 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
+       unsigned short vid = VLAN_N_VID;
+       struct net_device *dev, *pdev;
        struct br_mdb_entry *entry;
-       struct net_device *dev;
+       struct net_bridge_port *p;
+       struct net_port_vlans *pv;
        struct net_bridge *br;
        int err;
 
@@ -403,9 +478,32 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        br = netdev_priv(dev);
 
-       err = __br_mdb_add(net, br, entry);
-       if (!err)
-               __br_mdb_notify(dev, entry, RTM_NEWMDB);
+       /* If vlan filtering is enabled and VLAN is not specified
+        * install mdb entry on all vlans configured on the port.
+        */
+       pdev = __dev_get_by_index(net, entry->ifindex);
+       if (!pdev)
+               return -ENODEV;
+
+       p = br_port_get_rtnl(pdev);
+       if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+               return -EINVAL;
+
+       pv = nbp_get_vlan_info(p);
+       if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+                       entry->vid = vid;
+                       err = __br_mdb_add(net, br, entry);
+                       if (err)
+                               break;
+                       __br_mdb_notify(dev, entry, RTM_NEWMDB);
+               }
+       } else {
+               err = __br_mdb_add(net, br, entry);
+               if (!err)
+                       __br_mdb_notify(dev, entry, RTM_NEWMDB);
+       }
+
        return err;
 }
 
@@ -422,6 +520,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
                return -EINVAL;
 
        memset(&ip, 0, sizeof(ip));
+       ip.vid = entry->vid;
        ip.proto = entry->addr.proto;
        if (ip.proto == htons(ETH_P_IP))
                ip.u.ip4 = entry->addr.u.ip4;
@@ -446,6 +545,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
                if (p->port->state == BR_STATE_DISABLED)
                        goto unlock;
 
+               entry->state = p->state;
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
@@ -465,8 +565,12 @@ unlock:
 
 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-       struct net_device *dev;
+       struct net *net = sock_net(skb->sk);
+       unsigned short vid = VLAN_N_VID;
+       struct net_device *dev, *pdev;
        struct br_mdb_entry *entry;
+       struct net_bridge_port *p;
+       struct net_port_vlans *pv;
        struct net_bridge *br;
        int err;
 
@@ -476,9 +580,31 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        br = netdev_priv(dev);
 
-       err = __br_mdb_del(br, entry);
-       if (!err)
-               __br_mdb_notify(dev, entry, RTM_DELMDB);
+       /* If vlan filtering is enabled and VLAN is not specified
+        * delete mdb entry on all vlans configured on the port.
+        */
+       pdev = __dev_get_by_index(net, entry->ifindex);
+       if (!pdev)
+               return -ENODEV;
+
+       p = br_port_get_rtnl(pdev);
+       if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+               return -EINVAL;
+
+       pv = nbp_get_vlan_info(p);
+       if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+                       entry->vid = vid;
+                       err = __br_mdb_del(br, entry);
+                       if (!err)
+                               __br_mdb_notify(dev, entry, RTM_DELMDB);
+               }
+       } else {
+               err = __br_mdb_del(br, entry);
+               if (!err)
+                       __br_mdb_notify(dev, entry, RTM_DELMDB);
+       }
+
        return err;
 }
 
index 742a6c27d7a222bc3c53c288b4a6915194310fe6..0752796fe0ba4443036a94a385ef4d1666cd3adc 100644 (file)
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br,
                                       struct bridge_mcast_own_query *query);
 static void br_multicast_add_router(struct net_bridge *br,
                                    struct net_bridge_port *port);
+static void br_ip4_multicast_leave_group(struct net_bridge *br,
+                                        struct net_bridge_port *port,
+                                        __be32 group,
+                                        __u16 vid);
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_leave_group(struct net_bridge *br,
+                                        struct net_bridge_port *port,
+                                        const struct in6_addr *group,
+                                        __u16 vid);
+#endif
 unsigned int br_mdb_rehash_seq;
 
 static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -273,6 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
+               br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+                             p->state);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
                if (!mp->ports && !mp->mglist &&
@@ -694,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
        if (unlikely(!p))
                goto err;
        rcu_assign_pointer(*pp, p);
-       br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+       br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
 
 found:
        mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -754,6 +766,7 @@ static void br_multicast_router_expired(unsigned long data)
                goto out;
 
        hlist_del_init_rcu(&port->rlist);
+       br_rtr_notify(br->dev, port, RTM_DELMDB);
 
 out:
        spin_unlock(&br->multicast_lock);
@@ -914,6 +927,15 @@ void br_multicast_add_port(struct net_bridge_port *port)
 
 void br_multicast_del_port(struct net_bridge_port *port)
 {
+       struct net_bridge *br = port->br;
+       struct net_bridge_port_group *pg;
+       struct hlist_node *n;
+
+       /* Take care of the remaining groups, only perm ones should be left */
+       spin_lock_bh(&br->multicast_lock);
+       hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
+               br_multicast_del_pg(br, pg);
+       spin_unlock_bh(&br->multicast_lock);
        del_timer_sync(&port->multicast_router_timer);
 }
 
@@ -953,10 +975,13 @@ void br_multicast_disable_port(struct net_bridge_port *port)
 
        spin_lock(&br->multicast_lock);
        hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
-               br_multicast_del_pg(br, pg);
+               if (pg->state == MDB_TEMPORARY)
+                       br_multicast_del_pg(br, pg);
 
-       if (!hlist_unhashed(&port->rlist))
+       if (!hlist_unhashed(&port->rlist)) {
                hlist_del_init_rcu(&port->rlist);
+               br_rtr_notify(br->dev, port, RTM_DELMDB);
+       }
        del_timer(&port->multicast_router_timer);
        del_timer(&port->ip4_own_query.timer);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1010,9 +1035,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
                        continue;
                }
 
-               err = br_ip4_multicast_add_group(br, port, group, vid);
-               if (err)
-                       break;
+               if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
+                    type == IGMPV3_MODE_IS_INCLUDE) &&
+                   ntohs(grec->grec_nsrcs) == 0) {
+                       br_ip4_multicast_leave_group(br, port, group, vid);
+               } else {
+                       err = br_ip4_multicast_add_group(br, port, group, vid);
+                       if (err)
+                               break;
+               }
        }
 
        return err;
@@ -1071,10 +1102,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                        continue;
                }
 
-               err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
-                                                vid);
-               if (err)
-                       break;
+               if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
+                    grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
+                   ntohs(*nsrcs) == 0) {
+                       br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
+                                                    vid);
+               } else {
+                       err = br_ip6_multicast_add_group(br, port,
+                                                        &grec->grec_mca, vid);
+                       if (!err)
+                               break;
+               }
        }
 
        return err;
@@ -1181,6 +1219,7 @@ static void br_multicast_add_router(struct net_bridge *br,
                hlist_add_behind_rcu(&port->rlist, slot);
        else
                hlist_add_head_rcu(&port->rlist, &br->router_list);
+       br_rtr_notify(br->dev, port, RTM_NEWMDB);
 }
 
 static void br_multicast_mark_router(struct net_bridge *br,
@@ -1393,8 +1432,7 @@ br_multicast_leave_group(struct net_bridge *br,
 
        spin_lock(&br->multicast_lock);
        if (!netif_running(br->dev) ||
-           (port && port->state == BR_STATE_DISABLED) ||
-           timer_pending(&other_query->timer))
+           (port && port->state == BR_STATE_DISABLED))
                goto out;
 
        mdb = mlock_dereference(br->mdb, br);
@@ -1402,6 +1440,32 @@ br_multicast_leave_group(struct net_bridge *br,
        if (!mp)
                goto out;
 
+       if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
+               struct net_bridge_port_group __rcu **pp;
+
+               for (pp = &mp->ports;
+                    (p = mlock_dereference(*pp, br)) != NULL;
+                    pp = &p->next) {
+                       if (p->port != port)
+                               continue;
+
+                       rcu_assign_pointer(*pp, p->next);
+                       hlist_del_init(&p->mglist);
+                       del_timer(&p->timer);
+                       call_rcu_bh(&p->rcu, br_multicast_free_pg);
+                       br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+                                     p->state);
+
+                       if (!mp->ports && !mp->mglist &&
+                           netif_running(br->dev))
+                               mod_timer(&mp->timer, jiffies);
+               }
+               goto out;
+       }
+
+       if (timer_pending(&other_query->timer))
+               goto out;
+
        if (br->multicast_querier) {
                __br_multicast_send_query(br, port, &mp->addr);
 
@@ -1427,28 +1491,6 @@ br_multicast_leave_group(struct net_bridge *br,
                }
        }
 
-       if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
-               struct net_bridge_port_group __rcu **pp;
-
-               for (pp = &mp->ports;
-                    (p = mlock_dereference(*pp, br)) != NULL;
-                    pp = &p->next) {
-                       if (p->port != port)
-                               continue;
-
-                       rcu_assign_pointer(*pp, p->next);
-                       hlist_del_init(&p->mglist);
-                       del_timer(&p->timer);
-                       call_rcu_bh(&p->rcu, br_multicast_free_pg);
-                       br_mdb_notify(br->dev, port, group, RTM_DELMDB);
-
-                       if (!mp->ports && !mp->mglist &&
-                           netif_running(br->dev))
-                               mod_timer(&mp->timer, jiffies);
-               }
-               goto out;
-       }
-
        now = jiffies;
        time = now + br->multicast_last_member_count *
                     br->multicast_last_member_interval;
@@ -1729,12 +1771,6 @@ void br_multicast_open(struct net_bridge *br)
 
 void br_multicast_stop(struct net_bridge *br)
 {
-       struct net_bridge_mdb_htable *mdb;
-       struct net_bridge_mdb_entry *mp;
-       struct hlist_node *n;
-       u32 ver;
-       int i;
-
        del_timer_sync(&br->multicast_router_timer);
        del_timer_sync(&br->ip4_other_query.timer);
        del_timer_sync(&br->ip4_own_query.timer);
@@ -1742,6 +1778,15 @@ void br_multicast_stop(struct net_bridge *br)
        del_timer_sync(&br->ip6_other_query.timer);
        del_timer_sync(&br->ip6_own_query.timer);
 #endif
+}
+
+void br_multicast_dev_del(struct net_bridge *br)
+{
+       struct net_bridge_mdb_htable *mdb;
+       struct net_bridge_mdb_entry *mp;
+       struct hlist_node *n;
+       u32 ver;
+       int i;
 
        spin_lock_bh(&br->multicast_lock);
        mdb = mlock_dereference(br->mdb, br);
@@ -1809,8 +1854,10 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
                p->multicast_router = val;
                err = 0;
 
-               if (val < 2 && !hlist_unhashed(&p->rlist))
+               if (val < 2 && !hlist_unhashed(&p->rlist)) {
                        hlist_del_init_rcu(&p->rlist);
+                       br_rtr_notify(br->dev, p, RTM_DELMDB);
+               }
 
                if (val == 1)
                        break;
index c8b9bcfe997e48556cfc2633981e272e063f6880..0a6f095bb0c9eef4cc6e6b543db0b0ba3967d67d 100644 (file)
@@ -49,9 +49,9 @@ static struct ctl_table_header *brnf_sysctl_header;
 static int brnf_call_iptables __read_mostly = 1;
 static int brnf_call_ip6tables __read_mostly = 1;
 static int brnf_call_arptables __read_mostly = 1;
-static int brnf_filter_vlan_tagged __read_mostly = 0;
-static int brnf_filter_pppoe_tagged __read_mostly = 0;
-static int brnf_pass_vlan_indev __read_mostly = 0;
+static int brnf_filter_vlan_tagged __read_mostly;
+static int brnf_filter_pppoe_tagged __read_mostly;
+static int brnf_pass_vlan_indev __read_mostly;
 #else
 #define brnf_call_iptables 1
 #define brnf_call_ip6tables 1
@@ -284,7 +284,7 @@ int br_nf_pre_routing_finish_bridge(struct sock *sk, struct sk_buff *skb)
                                                         nf_bridge->neigh_header,
                                                         ETH_HLEN-ETH_ALEN);
                        /* tell br_dev_xmit to continue with forwarding */
-                       nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+                       nf_bridge->bridged_dnat = 1;
                        /* FIXME Need to refragment */
                        ret = neigh->output(neigh, skb);
                }
@@ -356,7 +356,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
                skb->pkt_type = PACKET_OTHERHOST;
                nf_bridge->pkt_otherhost = false;
        }
-       nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+       nf_bridge->in_prerouting = 0;
        if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
                if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
                        struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -444,7 +444,7 @@ struct net_device *setup_pre_routing(struct sk_buff *skb)
                nf_bridge->pkt_otherhost = true;
        }
 
-       nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
+       nf_bridge->in_prerouting = 1;
        nf_bridge->physindev = skb->dev;
        skb->dev = brnf_get_logical_dev(skb, skb->dev);
 
@@ -850,10 +850,8 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
                                   struct sk_buff *skb,
                                   const struct nf_hook_state *state)
 {
-       if (skb->nf_bridge &&
-           !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
+       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting)
                return NF_STOP;
-       }
 
        return NF_ACCEPT;
 }
@@ -872,7 +870,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
        struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 
        skb_pull(skb, ETH_HLEN);
-       nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
+       nf_bridge->bridged_dnat = 0;
 
        BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
 
@@ -887,7 +885,7 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
 
 static int br_nf_dev_xmit(struct sk_buff *skb)
 {
-       if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+       if (skb->nf_bridge && skb->nf_bridge->bridged_dnat) {
                br_nf_pre_routing_finish_bridge_slow(skb);
                return 1;
        }
index 13b7d1e3d1850e9aa408b55287c8ad72c950081a..77383bfe7ea38f888ce67619d03d1b7856bab84b 100644 (file)
@@ -174,7 +174,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sock *sk, struct sk_buff *skb)
                skb->pkt_type = PACKET_OTHERHOST;
                nf_bridge->pkt_otherhost = false;
        }
-       nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+       nf_bridge->in_prerouting = 0;
        if (br_nf_ipv6_daddr_was_changed(skb, nf_bridge)) {
                skb_dst_drop(skb);
                v6ops->route_input(skb);
index 364bdc98bd9bef003dfe4f17a1f2ac3048c0bd02..91a2e08c2bb84546fb2ab7ac2bd7974e2dcb999a 100644 (file)
@@ -164,8 +164,6 @@ static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
                            sizeof(vinfo), &vinfo))
                        goto nla_put_failure;
 
-               vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
-
                vinfo.vid = vid_end;
                vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
                if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
@@ -693,9 +691,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
                                    struct nlattr *tb[],
                                    struct nlattr *data[])
 {
+       struct net_bridge *br = netdev_priv(brdev);
+       int ret;
+
        if (!data)
                return 0;
-       return br_setport(br_port_get_rtnl(dev), data);
+
+       spin_lock_bh(&br->lock);
+       ret = br_setport(br_port_get_rtnl(dev), data);
+       spin_unlock_bh(&br->lock);
+
+       return ret;
 }
 
 static int br_port_fill_slave_info(struct sk_buff *skb,
index 8b21146b24a055652be0c7d74fd875ed875da918..e2cb359f9dd3279be534cb24d9f0a28cbcc3bf47 100644 (file)
@@ -466,6 +466,7 @@ void br_multicast_disable_port(struct net_bridge_port *port);
 void br_multicast_init(struct net_bridge *br);
 void br_multicast_open(struct net_bridge *br);
 void br_multicast_stop(struct net_bridge *br);
+void br_multicast_dev_del(struct net_bridge *br);
 void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
                          struct sk_buff *skb);
 void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
@@ -488,7 +489,9 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
 void br_mdb_init(void);
 void br_mdb_uninit(void);
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                  struct br_ip *group, int type);
+                  struct br_ip *group, int type, u8 state);
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+                  int type);
 
 #define mlock_dereference(X, br) \
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -565,6 +568,10 @@ static inline void br_multicast_stop(struct net_bridge *br)
 {
 }
 
+static inline void br_multicast_dev_del(struct net_bridge *br)
+{
+}
+
 static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
                                        struct sk_buff *skb)
 {
index b4b6dab9c2859730d3ba9b5bc17f8302ddca6510..ed74ffaa851ff43d08c3edb4a158ab21af27e1a3 100644 (file)
@@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p)
                br_send_config_bpdu(p, &bpdu);
                p->topology_change_ack = 0;
                p->config_pending = 0;
-               mod_timer(&p->hold_timer,
-                         round_jiffies(jiffies + BR_HOLD_TIME));
+               if (p->br->stp_enabled == BR_KERNEL_STP)
+                       mod_timer(&p->hold_timer,
+                                 round_jiffies(jiffies + BR_HOLD_TIME));
        }
 }
 
index a2730e7196cd7080b96a441f5cc40591320862e5..4ca449a161320f7ef1c6f4864940e8557a7d18e3 100644 (file)
@@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br)
        struct net_bridge_port *p;
 
        spin_lock_bh(&br->lock);
-       mod_timer(&br->hello_timer, jiffies + br->hello_time);
+       if (br->stp_enabled == BR_KERNEL_STP)
+               mod_timer(&br->hello_timer, jiffies + br->hello_time);
        mod_timer(&br->gc_timer, jiffies + HZ/10);
 
        br_config_bpdu_generation(br);
@@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br)
        int r;
        char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
        char *envp[] = { NULL };
+       struct net_bridge_port *p;
 
        r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
 
@@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br)
        if (r == 0) {
                br->stp_enabled = BR_USER_STP;
                br_debug(br, "userspace STP started\n");
+               /* Stop hello and hold timers */
+               del_timer(&br->hello_timer);
+               list_for_each_entry(p, &br->port_list, list)
+                       del_timer(&p->hold_timer);
        } else {
                br->stp_enabled = BR_KERNEL_STP;
                br_debug(br, "using kernel STP\n");
@@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br)
        int r;
        char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
        char *envp[] = { NULL };
+       struct net_bridge_port *p;
 
        if (br->stp_enabled == BR_USER_STP) {
                r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
                br_info(br, "userspace STP stopped, return code %d\n", r);
 
                /* To start timers on any ports left in blocking */
+               mod_timer(&br->hello_timer, jiffies + br->hello_time);
+               list_for_each_entry(p, &br->port_list, list)
+                       mod_timer(&p->hold_timer,
+                                 round_jiffies(jiffies + BR_HOLD_TIME));
                spin_lock_bh(&br->lock);
                br_port_state_selection(br);
                spin_unlock_bh(&br->lock);
index 7caf7fae2d5b8aa369b924e1c87a47c343fb8954..5f0f5af0ec35bf8c216935713a9d5f803456e1ab 100644 (file)
@@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg)
        if (br->dev->flags & IFF_UP) {
                br_config_bpdu_generation(br);
 
-               mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time));
+               if (br->stp_enabled != BR_USER_STP)
+                       mod_timer(&br->hello_timer,
+                                 round_jiffies(jiffies + br->hello_time));
        }
        spin_unlock(&br->lock);
 }
index 3cc71b9f551756ca63b1299e95d9b6424e5afb72..cc858919108ee1f9645bce1046be8650a640d821 100644 (file)
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
  * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
  * not dropped, but CAIF is sending flow off instead.
  */
-static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int err;
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+       bool queued = false;
 
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
                (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        err = sk_filter(sk, skb);
        if (err)
-               return err;
+               goto out;
+
        if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
                set_rx_flow_off(cf_sk);
                net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        }
        skb->dev = NULL;
        skb_set_owner_r(skb, sk);
-       /* Cache the SKB length before we tack it onto the receive
-        * queue. Once it is added it no longer belongs to us and
-        * may be freed by other threads of control pulling packets
-        * from the queue.
-        */
        spin_lock_irqsave(&list->lock, flags);
-       if (!sock_flag(sk, SOCK_DEAD))
+       queued = !sock_flag(sk, SOCK_DEAD);
+       if (queued)
                __skb_queue_tail(list, skb);
        spin_unlock_irqrestore(&list->lock, flags);
-
-       if (!sock_flag(sk, SOCK_DEAD))
+out:
+       if (queued)
                sk->sk_data_ready(sk);
        else
                kfree_skb(skb);
-       return 0;
 }
 
 /* Packet Receive Callback function called from CAIF Stack */
index fec0856dd6c031a2ae369410fc5d7f9c25a1fcf6..086b01fbe1bd846e61db3e5a7efc183abf2186b6 100644 (file)
@@ -23,3 +23,4 @@ obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
 obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
+obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
index b80fb91bb3f7e8dc630663cb5e012dc97ac6924f..4967262b27076af66347d20eca54b65a2e61d789 100644 (file)
@@ -131,6 +131,35 @@ out_noerr:
        goto out;
 }
 
+static int skb_set_peeked(struct sk_buff *skb)
+{
+       struct sk_buff *nskb;
+
+       if (skb->peeked)
+               return 0;
+
+       /* We have to unshare an skb before modifying it. */
+       if (!skb_shared(skb))
+               goto done;
+
+       nskb = skb_clone(skb, GFP_ATOMIC);
+       if (!nskb)
+               return -ENOMEM;
+
+       skb->prev->next = nskb;
+       skb->next->prev = nskb;
+       nskb->prev = skb->prev;
+       nskb->next = skb->next;
+
+       consume_skb(skb);
+       skb = nskb;
+
+done:
+       skb->peeked = 1;
+
+       return 0;
+}
+
 /**
  *     __skb_recv_datagram - Receive a datagram skbuff
  *     @sk: socket
@@ -165,7 +194,9 @@ out_noerr:
 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                    int *peeked, int *off, int *err)
 {
+       struct sk_buff_head *queue = &sk->sk_receive_queue;
        struct sk_buff *skb, *last;
+       unsigned long cpu_flags;
        long timeo;
        /*
         * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                 * Look at current nfs client by the way...
                 * However, this function was correct in any case. 8)
                 */
-               unsigned long cpu_flags;
-               struct sk_buff_head *queue = &sk->sk_receive_queue;
                int _off = *off;
 
                last = (struct sk_buff *)queue;
@@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                        _off -= skb->len;
                                        continue;
                                }
-                               skb->peeked = 1;
+
+                               error = skb_set_peeked(skb);
+                               if (error)
+                                       goto unlock_err;
+
                                atomic_inc(&skb->users);
                        } else
                                __skb_unlink(skb, queue);
@@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
 
        return NULL;
 
+unlock_err:
+       spin_unlock_irqrestore(&queue->lock, cpu_flags);
 no_packet:
        *err = error;
        return NULL;
@@ -622,7 +657,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
                    !skb->csum_complete_sw)
                        netdev_rx_csum_fault(skb->dev);
        }
-       skb->csum_valid = !sum;
+       if (!skb_shared(skb))
+               skb->csum_valid = !sum;
        return sum;
 }
 EXPORT_SYMBOL(__skb_checksum_complete_head);
@@ -642,11 +678,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
                        netdev_rx_csum_fault(skb->dev);
        }
 
-       /* Save full packet checksum */
-       skb->csum = csum;
-       skb->ip_summed = CHECKSUM_COMPLETE;
-       skb->csum_complete_sw = 1;
-       skb->csum_valid = !sum;
+       if (!skb_shared(skb)) {
+               /* Save full packet checksum */
+               skb->csum = csum;
+               skb->ip_summed = CHECKSUM_COMPLETE;
+               skb->csum_complete_sw = 1;
+               skb->csum_valid = !sum;
+       }
 
        return sum;
 }
index a8e4dd4302853702fef7fb1462fbdeb8a38f45e1..4870c3556a5a68be94cf28b65d527331810e7187 100644 (file)
@@ -3061,6 +3061,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
        else
                skb_dst_force(skb);
 
+#ifdef CONFIG_NET_SWITCHDEV
+       /* Don't forward if offload device already forwarded */
+       if (skb->offload_fwd_mark &&
+           skb->offload_fwd_mark == dev->offload_fwd_mark) {
+               consume_skb(skb);
+               rc = NET_XMIT_SUCCESS;
+               goto out;
+       }
+#endif
+
        txq = netdev_pick_tx(dev, skb, accel_priv);
        q = rcu_dereference_bh(txq->qdisc);
 
@@ -3645,7 +3655,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
 
        qdisc_skb_cb(skb)->pkt_len = skb->len;
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
-       qdisc_bstats_update_cpu(cl->q, skb);
+       qdisc_bstats_cpu_update(cl->q, skb);
 
        switch (tc_classify(skb, cl, &cl_res)) {
        case TC_ACT_OK:
@@ -3653,7 +3663,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                skb->tc_index = TC_H_MIN(cl_res.classid);
                break;
        case TC_ACT_SHOT:
-               qdisc_qstats_drop_cpu(cl->q);
+               qdisc_qstats_cpu_drop(cl->q);
        case TC_ACT_STOLEN:
        case TC_ACT_QUEUED:
                kfree_skb(skb);
@@ -4985,7 +4995,7 @@ EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
  * Gets the next netdev_adjacent->private from the dev's lower neighbour
  * list, starting from iter position. The caller must hold either hold the
  * RTNL lock or its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next_private(struct net_device *dev,
                                    struct list_head **iter)
@@ -5040,7 +5050,7 @@ EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  * Gets the next netdev_adjacent from the dev's lower neighbour
  * list, starting from iter position. The caller must hold RTNL lock or
  * its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
 {
@@ -6074,6 +6084,26 @@ int dev_get_phys_port_name(struct net_device *dev,
 }
 EXPORT_SYMBOL(dev_get_phys_port_name);
 
+/**
+ *     dev_change_proto_down - update protocol port state information
+ *     @dev: device
+ *     @proto_down: new value
+ *
+ *     This info can be used by switch drivers to set the phys state of the
+ *     port.
+ */
+int dev_change_proto_down(struct net_device *dev, bool proto_down)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (!ops->ndo_change_proto_down)
+               return -EOPNOTSUPP;
+       if (!netif_device_present(dev))
+               return -ENODEV;
+       return ops->ndo_change_proto_down(dev, proto_down);
+}
+EXPORT_SYMBOL(dev_change_proto_down);
+
 /**
  *     dev_new_index   -       allocate an ifindex
  *     @net: the applicable net namespace
@@ -7639,7 +7669,7 @@ static int __init net_dev_init(void)
        open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
        hotcpu_notifier(dev_cpu_callback, 0);
-       dst_init();
+       dst_subsys_init();
        rc = 0;
 out:
        return rc;
index e956ce6d13782f2da0a229cabafef663665159eb..f8694d1b8702e70db45a0eee94b5361ecb4214e0 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/prefetch.h>
 
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 
 /*
  * Theory of operations:
@@ -158,19 +159,10 @@ const u32 dst_default_metrics[RTAX_MAX + 1] = {
        [RTAX_MAX] = 0xdeadbeef,
 };
 
-
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
-               int initial_ref, int initial_obsolete, unsigned short flags)
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+             struct net_device *dev, int initial_ref, int initial_obsolete,
+             unsigned short flags)
 {
-       struct dst_entry *dst;
-
-       if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
-               if (ops->gc(ops))
-                       return NULL;
-       }
-       dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
-       if (!dst)
-               return NULL;
        dst->child = NULL;
        dst->dev = dev;
        if (dev)
@@ -200,6 +192,25 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
        dst->next = NULL;
        if (!(flags & DST_NOCOUNT))
                dst_entries_add(ops, 1);
+}
+EXPORT_SYMBOL(dst_init);
+
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
+               int initial_ref, int initial_obsolete, unsigned short flags)
+{
+       struct dst_entry *dst;
+
+       if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
+               if (ops->gc(ops))
+                       return NULL;
+       }
+
+       dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+       if (!dst)
+               return NULL;
+
+       dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
+
        return dst;
 }
 EXPORT_SYMBOL(dst_alloc);
@@ -248,7 +259,11 @@ again:
                dst->ops->destroy(dst);
        if (dst->dev)
                dev_put(dst->dev);
-       kmem_cache_free(dst->ops->kmem_cachep, dst);
+
+       if (dst->flags & DST_METADATA)
+               kfree(dst);
+       else
+               kmem_cache_free(dst->ops->kmem_cachep, dst);
 
        dst = child;
        if (dst) {
@@ -284,7 +299,9 @@ void dst_release(struct dst_entry *dst)
                int newrefcnt;
 
                newrefcnt = atomic_dec_return(&dst->__refcnt);
-               WARN_ON(newrefcnt < 0);
+               if (unlikely(newrefcnt < 0))
+                       net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
+                                            __func__, dst, newrefcnt);
                if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
                        call_rcu(&dst->rcu_head, dst_destroy_rcu);
        }
@@ -327,6 +344,70 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
 }
 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
 
+static struct dst_ops md_dst_ops = {
+       .family =               AF_UNSPEC,
+};
+
+static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
+{
+       WARN_ONCE(1, "Attempting to call output on metadata dst\n");
+       kfree_skb(skb);
+       return 0;
+}
+
+static int dst_md_discard(struct sk_buff *skb)
+{
+       WARN_ONCE(1, "Attempting to call input on metadata dst\n");
+       kfree_skb(skb);
+       return 0;
+}
+
+static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
+{
+       struct dst_entry *dst;
+
+       dst = &md_dst->dst;
+       dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+                DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
+
+       dst->input = dst_md_discard;
+       dst->output = dst_md_discard_sk;
+
+       memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
+       md_dst->opts_len = optslen;
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
+{
+       struct metadata_dst *md_dst;
+
+       md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
+       if (!md_dst)
+               return NULL;
+
+       __metadata_dst_init(md_dst, optslen);
+
+       return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc);
+
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
+{
+       int cpu;
+       struct metadata_dst __percpu *md_dst;
+
+       md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
+                                   __alignof__(struct metadata_dst), flags);
+       if (!md_dst)
+               return NULL;
+
+       for_each_possible_cpu(cpu)
+               __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
+
+       return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
+
 /* Dirty hack. We did it in 2.2 (in __dst_free),
  * we have _very_ good reasons not to repeat
  * this mistake in 2.3, but we have no choice
@@ -391,7 +472,7 @@ static struct notifier_block dst_dev_notifier = {
        .priority = -10, /* must be called after other network notifiers */
 };
 
-void __init dst_init(void)
+void __init dst_subsys_init(void)
 {
        register_netdevice_notifier(&dst_dev_notifier);
 }
index 9a12668f7d62720c6ca18f09d13c45ea3e2ca2b2..ae8306e7c56f966196b570eb372a5321d146248a 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/fib_rules.h>
+#include <net/ip_tunnels.h>
 
 int fib_default_rule_add(struct fib_rules_ops *ops,
                         u32 pref, u32 table, u32 flags)
@@ -186,6 +187,9 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
        if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
                goto out;
 
+       if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
+               goto out;
+
        ret = ops->match(rule, fl, flags);
 out:
        return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -330,6 +334,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        if (tb[FRA_FWMASK])
                rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
 
+       if (tb[FRA_TUN_ID])
+               rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
+
        rule->action = frh->action;
        rule->flags = frh->flags;
        rule->table = frh_get_table(frh, tb);
@@ -407,6 +414,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        if (unresolved)
                ops->unresolved_rules++;
 
+       if (rule->tun_id)
+               ip_tunnel_need_metadata();
+
        notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
        flush_route_cache(ops);
        rules_ops_put(ops);
@@ -473,6 +483,10 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
                    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
                        continue;
 
+               if (tb[FRA_TUN_ID] &&
+                   (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
+                       continue;
+
                if (!ops->compare(rule, frh, tb))
                        continue;
 
@@ -487,6 +501,9 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
                                goto errout;
                }
 
+               if (rule->tun_id)
+                       ip_tunnel_unneed_metadata();
+
                list_del_rcu(&rule->list);
 
                if (rule->action == FR_ACT_GOTO) {
@@ -535,7 +552,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
                         + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
                         + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
                         + nla_total_size(4) /* FRA_FWMARK */
-                        + nla_total_size(4); /* FRA_FWMASK */
+                        + nla_total_size(4) /* FRA_FWMASK */
+                        + nla_total_size(8); /* FRA_TUN_ID */
 
        if (ops->nlmsg_payload)
                payload += ops->nlmsg_payload(rule);
@@ -591,7 +609,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
            ((rule->mark_mask || rule->mark) &&
             nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
            (rule->target &&
-            nla_put_u32(skb, FRA_GOTO, rule->target)))
+            nla_put_u32(skb, FRA_GOTO, rule->target)) ||
+           (rule->tun_id &&
+            nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
                goto nla_put_failure;
 
        if (rule->suppress_ifgroup != -1) {
index be3098fb65e45624e2e5a94b0f653d66e68293d9..a50dbfa83ad9c4459dde60bc07908170f607d3c9 100644 (file)
@@ -47,6 +47,8 @@
 #include <linux/if_vlan.h>
 #include <linux/bpf.h>
 #include <net/sch_generic.h>
+#include <net/cls_cgroup.h>
+#include <net/dst_metadata.h>
 
 /**
  *     sk_filter - run a packet through a socket filter
@@ -1424,6 +1426,136 @@ const struct bpf_func_proto bpf_clone_redirect_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
+static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       return task_get_classid((struct sk_buff *) (unsigned long) r1);
+}
+
+static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
+       .func           = bpf_get_cgroup_classid,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+};
+
+static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       __be16 vlan_proto = (__force __be16) r2;
+
+       if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
+                    vlan_proto != htons(ETH_P_8021AD)))
+               vlan_proto = htons(ETH_P_8021Q);
+
+       return skb_vlan_push(skb, vlan_proto, vlan_tci);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_push_proto = {
+       .func           = bpf_skb_vlan_push,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
+
+static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+
+       return skb_vlan_pop(skb);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
+       .func           = bpf_skb_vlan_pop,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
+
+bool bpf_helper_changes_skb_data(void *func)
+{
+       if (func == bpf_skb_vlan_push)
+               return true;
+       if (func == bpf_skb_vlan_pop)
+               return true;
+       return false;
+}
+
+static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
+       struct ip_tunnel_info *info = skb_tunnel_info(skb, AF_INET);
+
+       if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
+               return -EINVAL;
+
+       to->tunnel_id = be64_to_cpu(info->key.tun_id);
+       to->remote_ipv4 = be32_to_cpu(info->key.ipv4_src);
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
+       .func           = bpf_skb_get_tunnel_key,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg4_type      = ARG_ANYTHING,
+};
+
+static struct metadata_dst __percpu *md_dst;
+
+static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
+       struct metadata_dst *md = this_cpu_ptr(md_dst);
+       struct ip_tunnel_info *info;
+
+       if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags))
+               return -EINVAL;
+
+       skb_dst_drop(skb);
+       dst_hold((struct dst_entry *) md);
+       skb_dst_set(skb, (struct dst_entry *) md);
+
+       info = &md->u.tun_info;
+       info->mode = IP_TUNNEL_INFO_TX;
+       info->key.tun_id = cpu_to_be64(from->tunnel_id);
+       info->key.ipv4_dst = cpu_to_be32(from->remote_ipv4);
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
+       .func           = bpf_skb_set_tunnel_key,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg4_type      = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
+{
+       if (!md_dst) {
+               /* race is not possible, since it's called from
+                * verifier that is holding verifier mutex
+                */
+               md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
+               if (!md_dst)
+                       return NULL;
+       }
+       return &bpf_skb_set_tunnel_key_proto;
+}
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -1461,6 +1593,16 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_l4_csum_replace_proto;
        case BPF_FUNC_clone_redirect:
                return &bpf_clone_redirect_proto;
+       case BPF_FUNC_get_cgroup_classid:
+               return &bpf_get_cgroup_classid_proto;
+       case BPF_FUNC_skb_vlan_push:
+               return &bpf_skb_vlan_push_proto;
+       case BPF_FUNC_skb_vlan_pop:
+               return &bpf_skb_vlan_pop_proto;
+       case BPF_FUNC_skb_get_tunnel_key:
+               return &bpf_skb_get_tunnel_key_proto;
+       case BPF_FUNC_skb_set_tunnel_key:
+               return bpf_get_skb_set_tunnel_key_proto();
        default:
                return sk_filter_func_proto(func_id);
        }
@@ -1569,6 +1711,13 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                                      offsetof(struct net_device, ifindex));
                break;
 
+       case offsetof(struct __sk_buff, hash):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+
+               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                                     offsetof(struct sk_buff, hash));
+               break;
+
        case offsetof(struct __sk_buff, mark):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 
index 2a834c6179b9973e45274d793e7d744939e5f49e..11e6540fa386918fe207b2e72e45b60d35ddb963 100644 (file)
@@ -590,6 +590,15 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
 }
 EXPORT_SYMBOL(make_flow_keys_digest);
 
+static inline void __skb_set_sw_hash(struct sk_buff *skb, u32 hash,
+                                    struct flow_keys *keys)
+{
+       if (keys->ports.ports)
+               skb->l4_hash = 1;
+       skb->sw_hash = 1;
+       skb->hash = hash;
+}
+
 /**
  * __skb_get_hash: calculate a flow hash
  * @skb: sk_buff to calculate flow hash from
@@ -609,10 +618,8 @@ void __skb_get_hash(struct sk_buff *skb)
        hash = ___skb_get_hash(skb, &keys, hashrnd);
        if (!hash)
                return;
-       if (keys.ports.ports)
-               skb->l4_hash = 1;
-       skb->sw_hash = 1;
-       skb->hash = hash;
+
+       __skb_set_sw_hash(skb, hash, &keys);
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
@@ -624,6 +631,49 @@ __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
 }
 EXPORT_SYMBOL(skb_get_hash_perturb);
 
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6)
+{
+       struct flow_keys keys;
+
+       memset(&keys, 0, sizeof(keys));
+
+       memcpy(&keys.addrs.v6addrs.src, &fl6->saddr,
+              sizeof(keys.addrs.v6addrs.src));
+       memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr,
+              sizeof(keys.addrs.v6addrs.dst));
+       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+       keys.ports.src = fl6->fl6_sport;
+       keys.ports.dst = fl6->fl6_dport;
+       keys.keyid.keyid = fl6->fl6_gre_key;
+       keys.tags.flow_label = (__force u32)fl6->flowlabel;
+       keys.basic.ip_proto = fl6->flowi6_proto;
+
+       __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), &keys);
+
+       return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi6);
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4)
+{
+       struct flow_keys keys;
+
+       memset(&keys, 0, sizeof(keys));
+
+       keys.addrs.v4addrs.src = fl4->saddr;
+       keys.addrs.v4addrs.dst = fl4->daddr;
+       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+       keys.ports.src = fl4->fl4_sport;
+       keys.ports.dst = fl4->fl4_dport;
+       keys.keyid.keyid = fl4->fl4_gre_key;
+       keys.basic.ip_proto = fl4->flowi4_proto;
+
+       __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), &keys);
+
+       return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi4);
+
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
                   const struct flow_keys *keys, int hlen)
 {
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
new file mode 100644 (file)
index 0000000..5d6d8e3
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * lwtunnel    Infrastructure for light weight tunnels like mpls
+ *
+ * Authors:    Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/lwtunnel.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <net/lwtunnel.h>
+#include <net/rtnetlink.h>
+#include <net/ip6_fib.h>
+
+struct lwtunnel_state *lwtunnel_state_alloc(int encap_len)
+{
+       struct lwtunnel_state *lws;
+
+       lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC);
+
+       return lws;
+}
+EXPORT_SYMBOL(lwtunnel_state_alloc);
+
+static const struct lwtunnel_encap_ops __rcu *
+               lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
+                          unsigned int num)
+{
+       if (num > LWTUNNEL_ENCAP_MAX)
+               return -ERANGE;
+
+       return !cmpxchg((const struct lwtunnel_encap_ops **)
+                       &lwtun_encaps[num],
+                       NULL, ops) ? 0 : -1;
+}
+EXPORT_SYMBOL(lwtunnel_encap_add_ops);
+
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
+                          unsigned int encap_type)
+{
+       int ret;
+
+       if (encap_type == LWTUNNEL_ENCAP_NONE ||
+           encap_type > LWTUNNEL_ENCAP_MAX)
+               return -ERANGE;
+
+       ret = (cmpxchg((const struct lwtunnel_encap_ops **)
+                      &lwtun_encaps[encap_type],
+                      ops, NULL) == ops) ? 0 : -1;
+
+       synchronize_net();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_encap_del_ops);
+
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+                        struct nlattr *encap, struct lwtunnel_state **lws)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = -EINVAL;
+
+       if (encap_type == LWTUNNEL_ENCAP_NONE ||
+           encap_type > LWTUNNEL_ENCAP_MAX)
+               return ret;
+
+       ret = -EOPNOTSUPP;
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[encap_type]);
+       if (likely(ops && ops->build_state))
+               ret = ops->build_state(dev, encap, lws);
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+{
+       const struct lwtunnel_encap_ops *ops;
+       struct nlattr *nest;
+       int ret = -EINVAL;
+
+       if (!lwtstate)
+               return 0;
+
+       if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+           lwtstate->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
+
+       ret = -EOPNOTSUPP;
+       nest = nla_nest_start(skb, RTA_ENCAP);
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+       if (likely(ops && ops->fill_encap))
+               ret = ops->fill_encap(skb, lwtstate);
+       rcu_read_unlock();
+
+       if (ret)
+               goto nla_put_failure;
+       nla_nest_end(skb, nest);
+       ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
+       if (ret)
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
+
+       return (ret == -EOPNOTSUPP ? 0 : ret);
+}
+EXPORT_SYMBOL(lwtunnel_fill_encap);
+
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = 0;
+
+       if (!lwtstate)
+               return 0;
+
+       if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+           lwtstate->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
+
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+       if (likely(ops && ops->get_encap_size))
+               ret = nla_total_size(ops->get_encap_size(lwtstate));
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_get_encap_size);
+
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = 0;
+
+       if (!a && !b)
+               return 0;
+
+       if (!a || !b)
+               return 1;
+
+       if (a->type != b->type)
+               return 1;
+
+       if (a->type == LWTUNNEL_ENCAP_NONE ||
+           a->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
+
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[a->type]);
+       if (likely(ops && ops->cmp_encap))
+               ret = ops->cmp_encap(a, b);
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_cmp_encap);
+
+int __lwtunnel_output(struct sock *sk, struct sk_buff *skb,
+                     struct lwtunnel_state *lwtstate)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = -EINVAL;
+
+       if (!lwtstate)
+               goto drop;
+
+       if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+           lwtstate->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
+
+       ret = -EOPNOTSUPP;
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+       if (likely(ops && ops->output))
+               ret = ops->output(sk, skb);
+       rcu_read_unlock();
+
+       if (ret == -EOPNOTSUPP)
+               goto drop;
+
+       return ret;
+
+drop:
+       kfree_skb(skb);
+
+       return ret;
+}
+
+int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+{
+       struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+       struct lwtunnel_state *lwtstate = NULL;
+
+       if (rt) {
+               lwtstate = rt->rt6i_lwtstate;
+               skb->dev = rt->dst.dev;
+       }
+
+       skb->protocol = htons(ETH_P_IPV6);
+
+       return __lwtunnel_output(sk, skb, lwtstate);
+}
+EXPORT_SYMBOL(lwtunnel_output6);
+
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+       struct rtable *rt = (struct rtable *)skb_dst(skb);
+       struct lwtunnel_state *lwtstate = NULL;
+
+       if (rt) {
+               lwtstate = rt->rt_lwtstate;
+               skb->dev = rt->dst.dev;
+       }
+
+       skb->protocol = htons(ETH_P_IP);
+
+       return __lwtunnel_output(sk, skb, lwtstate);
+}
+EXPORT_SYMBOL(lwtunnel_output);
index 18b34d771ed4dc7415a17cfdab83e56ee5683d02..194c1d03b2b3b1e78254fb0108682e4dfa3ab776 100644 (file)
@@ -404,6 +404,19 @@ static ssize_t group_store(struct device *dev, struct device_attribute *attr,
 NETDEVICE_SHOW(group, fmt_dec);
 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
 
+static int change_proto_down(struct net_device *dev, unsigned long proto_down)
+{
+       return dev_change_proto_down(dev, (bool) proto_down);
+}
+
+static ssize_t proto_down_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t len)
+{
+       return netdev_store(dev, attr, buf, len, change_proto_down);
+}
+NETDEVICE_SHOW_RW(proto_down, fmt_dec);
+
 static ssize_t phys_port_id_show(struct device *dev,
                                 struct device_attribute *attr, char *buf)
 {
@@ -501,6 +514,7 @@ static struct attribute *net_class_attrs[] = {
        &dev_attr_phys_port_id.attr,
        &dev_attr_phys_port_name.attr,
        &dev_attr_phys_switch_id.attr,
+       &dev_attr_proto_down.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(net_class);
index 1f2a126f4ffa07a6e50cd2b57a6042afca3ee3ad..6441f47b1a8ffc78731896fd4ab1b12db43f0992 100644 (file)
@@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
 
 struct cgroup_cls_state *task_cls_state(struct task_struct *p)
 {
-       return css_cls_state(task_css(p, net_cls_cgrp_id));
+       return css_cls_state(task_css_check(p, net_cls_cgrp_id,
+                                           rcu_read_lock_bh_held()));
 }
 EXPORT_SYMBOL_GPL(task_cls_state);
 
index 1ebdf1c0d1188c309d854bc9145c9b2f5b7b58a4..0e0fb30cbc04084c96d6ae8ceec94a2e493a3bbf 100644 (file)
@@ -273,7 +273,6 @@ struct pktgen_dev {
 
        /* runtime counters relating to clone_skb */
 
-       __u64 allocated_skbs;
        __u32 clone_count;
        int last_ok;            /* Was last skb sent?
                                 * Or a failed transmit of some sort?
@@ -2279,7 +2278,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
 {
-       pkt_dev->pkt_overhead = 0;
+       pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
        pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
        pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
        pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2788,6 +2787,7 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
        } else {
                 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
        }
+       skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
        return skb;
 }
@@ -3397,7 +3397,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                        return;
                }
                pkt_dev->last_pkt_size = pkt_dev->skb->len;
-               pkt_dev->allocated_skbs++;
                pkt_dev->clone_count = 0;       /* reset counter */
        }
 
index 9e433d58d2651cf867294a911d0f136e565730ae..788ceed394636e4a3c3c38e4fbfe383dca8e48df 100644 (file)
@@ -896,7 +896,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
               + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
               + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
-              + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
+              + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+              + nla_total_size(1); /* IFLA_PROTO_DOWN */
+
 }
 
 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1082,7 +1084,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
            (dev->ifalias &&
             nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
            nla_put_u32(skb, IFLA_CARRIER_CHANGES,
-                       atomic_read(&dev->carrier_changes)))
+                       atomic_read(&dev->carrier_changes)) ||
+           nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
                goto nla_put_failure;
 
        if (1) {
@@ -1319,6 +1322,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_CARRIER_CHANGES]  = { .type = NLA_U32 },  /* ignored */
        [IFLA_PHYS_SWITCH_ID]   = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
        [IFLA_LINK_NETNSID]     = { .type = NLA_S32 },
+       [IFLA_PROTO_DOWN]       = { .type = NLA_U8 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1804,10 +1808,13 @@ static int do_setlink(const struct sk_buff *skb,
                        goto errout;
 
                nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
-                       if (nla_type(attr) != IFLA_VF_PORT)
-                               continue;
-                       err = nla_parse_nested(port, IFLA_PORT_MAX,
-                               attr, ifla_port_policy);
+                       if (nla_type(attr) != IFLA_VF_PORT ||
+                           nla_len(attr) < NLA_HDRLEN) {
+                               err = -EINVAL;
+                               goto errout;
+                       }
+                       err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
+                                              ifla_port_policy);
                        if (err < 0)
                                goto errout;
                        if (!port[IFLA_PORT_VF]) {
@@ -1858,6 +1865,14 @@ static int do_setlink(const struct sk_buff *skb,
        }
        err = 0;
 
+       if (tb[IFLA_PROTO_DOWN]) {
+               err = dev_change_proto_down(dev,
+                                           nla_get_u8(tb[IFLA_PROTO_DOWN]));
+               if (err)
+                       goto errout;
+               status |= DO_SETLINK_NOTIFY;
+       }
+
 errout:
        if (status & DO_SETLINK_MODIFIED) {
                if (status & DO_SETLINK_NOTIFY)
@@ -1948,16 +1963,30 @@ static int rtnl_group_dellink(const struct net *net, int group)
        return 0;
 }
 
+int rtnl_delete_link(struct net_device *dev)
+{
+       const struct rtnl_link_ops *ops;
+       LIST_HEAD(list_kill);
+
+       ops = dev->rtnl_link_ops;
+       if (!ops || !ops->dellink)
+               return -EOPNOTSUPP;
+
+       ops->dellink(dev, &list_kill);
+       unregister_netdevice_many(&list_kill);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtnl_delete_link);
+
 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
-       const struct rtnl_link_ops *ops;
        struct net_device *dev;
        struct ifinfomsg *ifm;
        char ifname[IFNAMSIZ];
        struct nlattr *tb[IFLA_MAX+1];
        int err;
-       LIST_HEAD(list_kill);
 
        err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
        if (err < 0)
@@ -1979,13 +2008,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (!dev)
                return -ENODEV;
 
-       ops = dev->rtnl_link_ops;
-       if (!ops || !ops->dellink)
-               return -EOPNOTSUPP;
-
-       ops->dellink(dev, &list_kill);
-       unregister_netdevice_many(&list_kill);
-       return 0;
+       return rtnl_delete_link(dev);
 }
 
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
index 08f16db46070a1520fcdd6892477093e9474af4f..193901d097577a88e7fea3f59450cc041942f8fa 100644 (file)
@@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                sock_copy(newsk, sk);
 
                /* SANITY */
-               get_net(sock_net(newsk));
+               if (likely(newsk->sk_net_refcnt))
+                       get_net(sock_net(newsk));
                sk_node_init(&newsk->sk_node);
                sock_lock_init(newsk);
                bh_lock_sock(newsk);
@@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk)
  * sk_wait_data - wait for data to arrive at sk_receive_queue
  * @sk:    sock to wait on
  * @timeo: for how long
+ * @skb:   last skb seen on sk_receive_queue
  *
  * Now socket state including sk->sk_err is changed only under lock,
  * hence we may omit checks after joining wait queue.
  * We check receive queue before schedule() only as optimization;
  * it is very likely that release_sock() added new data.
  */
-int sk_wait_data(struct sock *sk, long *timeo)
+int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
 {
        int rc;
        DEFINE_WAIT(wait);
 
        prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
        set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
-       rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
+       rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
        clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        finish_wait(sk_sleep(sk), &wait);
        return rc;
index 43d3dd62fcc8eccd95a4618f68b0553cf7309c01..42689d5c468cb4f53baa058c74cdee58099137c7 100644 (file)
@@ -60,11 +60,15 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
        struct phy_device *phydev;
        unsigned int type;
 
+       if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->drv)
+               return false;
+
        if (skb_headroom(skb) < ETH_HLEN)
                return false;
+
        __skb_push(skb, ETH_HLEN);
 
-       type = classify(skb);
+       type = ptp_classify_raw(skb);
 
        __skb_pull(skb, ETH_HLEN);
 
index 52a94016526d3c595a71f4affac1a24251f964d7..b5cf13a2800923486ad597c296a66145bc248596 100644 (file)
@@ -886,7 +886,7 @@ verify_sock_status:
                        break;
                }
 
-               sk_wait_data(sk, &timeo);
+               sk_wait_data(sk, &timeo, NULL);
                continue;
        found_ok_skb:
                if (len > skb->len)
index d5f1f9b862ea5f4794ba2fab277e19aed8e51d2a..311796c809afc2f3807c653ebc64b81ef9bb2152 100644 (file)
 
 #include <linux/phy.h>
 #include <linux/netdevice.h>
+#include <linux/netpoll.h>
 
 struct dsa_device_ops {
-       netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+       struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
        int (*rcv)(struct sk_buff *skb, struct net_device *dev,
                   struct packet_type *pt, struct net_device *orig_dev);
 };
@@ -26,7 +27,7 @@ struct dsa_slave_priv {
         * switch port.
         */
        struct net_device       *dev;
-       netdev_tx_t             (*xmit)(struct sk_buff *skb,
+       struct sk_buff *        (*xmit)(struct sk_buff *skb,
                                        struct net_device *dev);
 
        /*
@@ -47,6 +48,9 @@ struct dsa_slave_priv {
        int                     old_duplex;
 
        struct net_device       *bridge_dev;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll          *netpoll;
+#endif
 };
 
 /* dsa.c */
index 0917123790eaf09b001c97a733039185fdb0a800..0010c690cc6715838c76da2d42c93ea9dcc113fc 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/rtnetlink.h>
 #include <net/switchdev.h>
 #include <linux/if_bridge.h>
+#include <linux/netpoll.h>
 #include "dsa_priv.h"
 
 /* slave mii_bus handling ***************************************************/
@@ -418,24 +419,53 @@ static int dsa_slave_port_attr_get(struct net_device *dev,
        return 0;
 }
 
-static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline netdev_tx_t dsa_netpoll_send_skb(struct dsa_slave_priv *p,
+                                              struct sk_buff *skb)
 {
-       struct dsa_slave_priv *p = netdev_priv(dev);
-
-       return p->xmit(skb, dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       if (p->netpoll)
+               netpoll_send_skb(p->netpoll, skb);
+#else
+       BUG();
+#endif
+       return NETDEV_TX_OK;
 }
 
-static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb,
-                                       struct net_device *dev)
+static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
+       struct sk_buff *nskb;
 
-       skb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(skb);
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+
+       /* Transmit function may have to reallocate the original SKB */
+       nskb = p->xmit(skb, dev);
+       if (!nskb)
+               return NETDEV_TX_OK;
+
+       /* SKB for netpoll still need to be mangled with the protocol-specific
+        * tag to be successfully transmitted
+        */
+       if (unlikely(netpoll_tx_running(dev)))
+               return dsa_netpoll_send_skb(p, nskb);
+
+       /* Queue the SKB for transmission on the parent interface, but
+        * do not modify its EtherType
+        */
+       nskb->dev = p->parent->dst->master_netdev;
+       dev_queue_xmit(nskb);
 
        return NETDEV_TX_OK;
 }
 
+static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
+                                           struct net_device *dev)
+{
+       /* Just return the original SKB */
+       return skb;
+}
+
 
 /* ethtool operations *******************************************************/
 static int
@@ -665,6 +695,49 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
        return ret;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int dsa_slave_netpoll_setup(struct net_device *dev,
+                                  struct netpoll_info *ni)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       struct net_device *master = ds->dst->master_netdev;
+       struct netpoll *netpoll;
+       int err = 0;
+
+       netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+       if (!netpoll)
+               return -ENOMEM;
+
+       err = __netpoll_setup(netpoll, master);
+       if (err) {
+               kfree(netpoll);
+               goto out;
+       }
+
+       p->netpoll = netpoll;
+out:
+       return err;
+}
+
+static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct netpoll *netpoll = p->netpoll;
+
+       if (!netpoll)
+               return;
+
+       p->netpoll = NULL;
+
+       __netpoll_free_async(netpoll);
+}
+
+static void dsa_slave_poll_controller(struct net_device *dev)
+{
+}
+#endif
+
 static const struct ethtool_ops dsa_slave_ethtool_ops = {
        .get_settings           = dsa_slave_get_settings,
        .set_settings           = dsa_slave_set_settings,
@@ -697,6 +770,11 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
        .ndo_fdb_dump           = dsa_slave_fdb_dump,
        .ndo_do_ioctl           = dsa_slave_ioctl,
        .ndo_get_iflink         = dsa_slave_get_iflink,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_netpoll_setup      = dsa_slave_netpoll_setup,
+       .ndo_netpoll_cleanup    = dsa_slave_netpoll_cleanup,
+       .ndo_poll_controller    = dsa_slave_poll_controller,
+#endif
 };
 
 static const struct switchdev_ops dsa_slave_switchdev_ops = {
index 83d3572cdb205934e3099c258244f347d7351517..e2aadb73111d544c1ce16db13bfd99c7506642fc 100644 (file)
 #define BRCM_EG_TC_MASK                0x7
 #define BRCM_EG_PID_MASK       0x1f
 
-static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        u8 *brcm_tag;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
                goto out_free;
 
@@ -87,17 +84,11 @@ static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
                brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
        brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
 
-       /* Queue the SKB for transmission on the parent interface, but
-        * do not modify its EtherType
-        */
-       skb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(skb);
-
-       return NETDEV_TX_OK;
+       return skb;
 
 out_free:
        kfree_skb(skb);
-       return NETDEV_TX_OK;
+       return NULL;
 }
 
 static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
index 2dab27063273d2d48d12cc13b9d73fefe92c9362..aa780e4ac0bd9653618c2b4c582d882cfb52f94a 100644 (file)
 
 #define DSA_HLEN       4
 
-static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        u8 *dsa_header;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        /*
         * Convert the outermost 802.1q tag to a DSA tag for tagged
         * packets, or insert a DSA tag between the addresses and
@@ -63,14 +60,11 @@ static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
                dsa_header[3] = 0x00;
        }
 
-       skb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(skb);
-
-       return NETDEV_TX_OK;
+       return skb;
 
 out_free:
        kfree_skb(skb);
-       return NETDEV_TX_OK;
+       return NULL;
 }
 
 static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
index 9aeda596f7ec4ec2c30df9167e87c7719535de91..2288c8098c42800c6477068c0334d7e1874ba608 100644 (file)
 #define DSA_HLEN       4
 #define EDSA_HLEN      8
 
-static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        u8 *edsa_header;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        /*
         * Convert the outermost 802.1q tag to a DSA tag and prepend
         * a DSA ethertype field is the packet is tagged, or insert
@@ -76,14 +73,11 @@ static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
                edsa_header[7] = 0x00;
        }
 
-       skb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(skb);
-
-       return NETDEV_TX_OK;
+       return skb;
 
 out_free:
        kfree_skb(skb);
-       return NETDEV_TX_OK;
+       return NULL;
 }
 
 static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
index e268f9db8893deab7c2febd26ad0aae1849b157d..d25efc93d8f120739c83c3998e77d5e9dd3cfc45 100644 (file)
 #include <linux/slab.h>
 #include "dsa_priv.h"
 
-static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct sk_buff *nskb;
        int padlen;
        u8 *trailer;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        /*
         * We have to make sure that the trailer ends up as the very
         * last 4 bytes of the packet.  This means that we have to pad
@@ -36,7 +33,7 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
        nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
        if (nskb == NULL) {
                kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return NULL;
        }
        skb_reserve(nskb, NET_IP_ALIGN);
 
@@ -57,10 +54,7 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
        trailer[2] = 0x10;
        trailer[3] = 0x00;
 
-       nskb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(nskb);
-
-       return NETDEV_TX_OK;
+       return nskb;
 }
 
 static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
index f46e4d1306f26363643c054f64d62a9acb39151e..214d44aef35b5cd69ffe859138938e311c717cc3 100644 (file)
@@ -207,7 +207,7 @@ found:
        } else {
                fq->q.meat += skb->len;
        }
-       add_frag_mem_limit(&fq->q, skb->truesize);
+       add_frag_mem_limit(fq->q.net, skb->truesize);
 
        if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            fq->q.meat == fq->q.len) {
@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
                clone->data_len = clone->len;
                head->data_len -= clone->len;
                head->len -= clone->len;
-               add_frag_mem_limit(&fq->q, clone->truesize);
+               add_frag_mem_limit(fq->q.net, clone->truesize);
        }
 
        WARN_ON(head == NULL);
@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
                }
                fp = next;
        }
-       sub_frag_mem_limit(&fq->q, sum_truesize);
+       sub_frag_mem_limit(fq->q.net, sum_truesize);
 
        head->next = NULL;
        head->dev = dev;
index b2155a123f6c88980c180eeb7b4ffdcf68bea4fb..8d5960a37195136380032644b65a8b741a03ab00 100644 (file)
@@ -23,6 +23,26 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
        rdev->ops->del_virtual_intf_deprecated(&rdev->wpan_phy, dev);
 }
 
+static inline int
+rdev_suspend(struct cfg802154_registered_device *rdev)
+{
+       int ret;
+       trace_802154_rdev_suspend(&rdev->wpan_phy);
+       ret = rdev->ops->suspend(&rdev->wpan_phy);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
+static inline int
+rdev_resume(struct cfg802154_registered_device *rdev)
+{
+       int ret;
+       trace_802154_rdev_resume(&rdev->wpan_phy);
+       ret = rdev->ops->resume(&rdev->wpan_phy);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
 static inline int
 rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
                      unsigned char name_assign_type,
index 133b4280660cfc2f9b651a56a95502991e21840b..bd88525b041e79c62a0458c499968315c5a56e4e 100644 (file)
  */
 
 #include <linux/device.h>
+#include <linux/rtnetlink.h>
 
 #include <net/cfg802154.h>
 
 #include "core.h"
 #include "sysfs.h"
+#include "rdev-ops.h"
 
 static inline struct cfg802154_registered_device *
 dev_to_rdev(struct device *dev)
@@ -62,10 +64,46 @@ static struct attribute *pmib_attrs[] = {
 };
 ATTRIBUTE_GROUPS(pmib);
 
+#ifdef CONFIG_PM_SLEEP
+static int wpan_phy_suspend(struct device *dev)
+{
+       struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+       int ret = 0;
+
+       if (rdev->ops->suspend) {
+               rtnl_lock();
+               ret = rdev_suspend(rdev);
+               rtnl_unlock();
+       }
+
+       return ret;
+}
+
+static int wpan_phy_resume(struct device *dev)
+{
+       struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+       int ret = 0;
+
+       if (rdev->ops->resume) {
+               rtnl_lock();
+               ret = rdev_resume(rdev);
+               rtnl_unlock();
+       }
+
+       return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
+#define WPAN_PHY_PM_OPS (&wpan_phy_pm_ops)
+#else
+#define WPAN_PHY_PM_OPS NULL
+#endif
+
 struct class wpan_phy_class = {
        .name = "ieee802154",
        .dev_release = wpan_phy_release,
        .dev_groups = pmib_groups,
+       .pm = WPAN_PHY_PM_OPS,
 };
 
 int wpan_phy_sysfs_init(void)
index 9b5f0eb366969c0c968935389b97ad0893b41cf1..4399b7fbaa31481c402079680e3509ed05fb9479 100644 (file)
  *                     rdev->ops traces                     *
  *************************************************************/
 
+DECLARE_EVENT_CLASS(wpan_phy_only_evt,
+       TP_PROTO(struct wpan_phy *wpan_phy),
+       TP_ARGS(wpan_phy),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT, WPAN_PHY_PR_ARG)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_suspend,
+       TP_PROTO(struct wpan_phy *wpan_phy),
+       TP_ARGS(wpan_phy)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_resume,
+       TP_PROTO(struct wpan_phy *wpan_phy),
+       TP_ARGS(wpan_phy)
+);
+
 TRACE_EVENT(802154_rdev_add_virtual_intf,
        TP_PROTO(struct wpan_phy *wpan_phy, char *name,
                 enum nl802154_iftype type, __le64 extended_addr),
index 9532ee87151f5d184205eafc70729fc543a9eb82..cc4e498a0ccf390115c2f6f0306248650f4a2c35 100644 (file)
 #include <net/raw.h>
 #include <net/icmp.h>
 #include <net/inet_common.h>
+#include <net/ip_tunnels.h>
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
 #include <net/secure_seq.h>
@@ -1780,6 +1781,8 @@ static int __init inet_init(void)
 
        dev_add_pack(&ip_packet_type);
 
+       ip_tunnel_core_init();
+
        rc = 0;
 out:
        return rc;
index 933a92820d265e07b8c42300c7be6742565723b5..34a308573f4b0e7bcae0ad9ffa5e45b19c72c1d1 100644 (file)
@@ -291,6 +291,40 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
        kfree_skb(skb);
 }
 
+/* Create and send an arp packet. */
+static void arp_send_dst(int type, int ptype, __be32 dest_ip,
+                        struct net_device *dev, __be32 src_ip,
+                        const unsigned char *dest_hw,
+                        const unsigned char *src_hw,
+                        const unsigned char *target_hw, struct sk_buff *oskb)
+{
+       struct sk_buff *skb;
+
+       /* arp on this interface. */
+       if (dev->flags & IFF_NOARP)
+               return;
+
+       skb = arp_create(type, ptype, dest_ip, dev, src_ip,
+                        dest_hw, src_hw, target_hw);
+       if (!skb)
+               return;
+
+       if (oskb)
+               skb_dst_copy(skb, oskb);
+
+       arp_xmit(skb);
+}
+
+void arp_send(int type, int ptype, __be32 dest_ip,
+             struct net_device *dev, __be32 src_ip,
+             const unsigned char *dest_hw, const unsigned char *src_hw,
+             const unsigned char *target_hw)
+{
+       arp_send_dst(type, ptype, dest_ip, dev, src_ip, dest_hw, src_hw,
+                    target_hw, NULL);
+}
+EXPORT_SYMBOL(arp_send);
+
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
 {
        __be32 saddr = 0;
@@ -346,8 +380,9 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
                }
        }
 
-       arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
-                dst_hw, dev->dev_addr, NULL);
+       arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
+                    dst_hw, dev->dev_addr, NULL,
+                    dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb);
 }
 
 static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -596,32 +631,6 @@ void arp_xmit(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(arp_xmit);
 
-/*
- *     Create and send an arp packet.
- */
-void arp_send(int type, int ptype, __be32 dest_ip,
-             struct net_device *dev, __be32 src_ip,
-             const unsigned char *dest_hw, const unsigned char *src_hw,
-             const unsigned char *target_hw)
-{
-       struct sk_buff *skb;
-
-       /*
-        *      No arp on this interface.
-        */
-
-       if (dev->flags&IFF_NOARP)
-               return;
-
-       skb = arp_create(type, ptype, dest_ip, dev, src_ip,
-                        dest_hw, src_hw, target_hw);
-       if (!skb)
-               return;
-
-       arp_xmit(skb);
-}
-EXPORT_SYMBOL(arp_send);
-
 /*
  *     Process an arp request.
  */
@@ -1017,14 +1026,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
 
        neigh = neigh_lookup(&arp_tbl, &ip, dev);
        if (neigh) {
-               read_lock_bh(&neigh->lock);
-               memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
-               r->arp_flags = arp_state_to_flags(neigh);
-               read_unlock_bh(&neigh->lock);
-               r->arp_ha.sa_family = dev->type;
-               strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
+               if (!(neigh->nud_state & NUD_NOARP)) {
+                       read_lock_bh(&neigh->lock);
+                       memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
+                       r->arp_flags = arp_state_to_flags(neigh);
+                       read_unlock_bh(&neigh->lock);
+                       r->arp_ha.sa_family = dev->type;
+                       strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
+                       err = 0;
+               }
                neigh_release(neigh);
-               err = 0;
        }
        return err;
 }
index 90c0e8386116177f4bbf412f2175aec93c64870c..f915abff1350a86af8d5bb89725b751c061b0fb5 100644 (file)
@@ -20,7 +20,7 @@
 #include <net/route.h>
 #include <net/tcp_states.h>
 
-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        sk_dst_reset(sk);
 
-       lock_sock(sk);
-
        oif = sk->sk_bound_dev_if;
        saddr = inet->inet_saddr;
        if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -76,15 +74,25 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_daddr = fl4->daddr;
        inet->inet_dport = usin->sin_port;
        sk->sk_state = TCP_ESTABLISHED;
-       inet_set_txhash(sk);
+       sk_set_txhash(sk);
        inet->inet_id = jiffies;
 
        sk_dst_set(sk, &rt->dst);
        err = 0;
 out:
-       release_sock(sk);
        return err;
 }
+EXPORT_SYMBOL(__ip4_datagram_connect);
+
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+       int res;
+
+       lock_sock(sk);
+       res = __ip4_datagram_connect(sk, uaddr, addr_len);
+       release_sock(sk);
+       return res;
+}
 EXPORT_SYMBOL(ip4_datagram_connect);
 
 /* Because UDP xmit path can manipulate sk_dst_cache without holding
index e813196c91c76a66cc2eb272064d484734bd7497..2d9cb1748f8191c785567632faf0ee14eaca628b 100644 (file)
@@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
                queue_delayed_work(system_power_efficient_wq,
                                &check_lifetime_work, 0);
                rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
-               blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
        }
        return 0;
 }
index 6bbc54940eb4ee4802a5a94835e44b0d22b170dd..6b98de0d79498d575a44d6c20bd3abb5a38ea75c 100644 (file)
@@ -280,6 +280,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
                fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
                fl4.flowi4_scope = scope;
                fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+               fl4.flowi4_tun_key.tun_id = 0;
                if (!fib_lookup(net, &fl4, &res, 0))
                        return FIB_RES_PREFSRC(net, res);
        } else {
@@ -313,6 +314,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        fl4.saddr = dst;
        fl4.flowi4_tos = tos;
        fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+       fl4.flowi4_tun_key.tun_id = 0;
 
        no_addr = idev->ifa_list == NULL;
 
@@ -591,6 +593,8 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
        [RTA_FLOW]              = { .type = NLA_U32 },
+       [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
+       [RTA_ENCAP]             = { .type = NLA_NESTED },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
@@ -656,6 +660,12 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                case RTA_TABLE:
                        cfg->fc_table = nla_get_u32(attr);
                        break;
+               case RTA_ENCAP:
+                       cfg->fc_encap = attr;
+                       break;
+               case RTA_ENCAP_TYPE:
+                       cfg->fc_encap_type = nla_get_u16(attr);
+                       break;
                }
        }
 
index c6211ed60b03be1940a1954c08adb8a265f4e124..9c02920725dbea00d2a4f84877e0769ff74cfe0c 100644 (file)
@@ -13,6 +13,7 @@ struct fib_alias {
        u8                      fa_state;
        u8                      fa_slen;
        u32                     tb_id;
+       s16                     fa_default;
        struct rcu_head         rcu;
 };
 
index c7358ea4ae93530a7f6ef110a2dc204f19ac830e..558e196bae0f5a10a6bc81102c657db00b726ad1 100644 (file)
@@ -42,6 +42,7 @@
 #include <net/ip_fib.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <net/lwtunnel.h>
 
 #include "fib_lookup.h"
 
@@ -208,6 +209,7 @@ static void free_fib_info_rcu(struct rcu_head *head)
        change_nexthops(fi) {
                if (nexthop_nh->nh_dev)
                        dev_put(nexthop_nh->nh_dev);
+               lwtstate_put(nexthop_nh->nh_lwtstate);
                free_nh_exceptions(nexthop_nh);
                rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
                rt_fibinfo_free(&nexthop_nh->nh_rth_input);
@@ -266,6 +268,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
 #ifdef CONFIG_IP_ROUTE_CLASSID
                    nh->nh_tclassid != onh->nh_tclassid ||
 #endif
+                   lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
                    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
                        return -1;
                onh++;
@@ -366,6 +369,7 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
        payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
 
        if (fi->fib_nhs) {
+               size_t nh_encapsize = 0;
                /* Also handles the special case fib_nhs == 1 */
 
                /* each nexthop is packed in an attribute */
@@ -374,8 +378,21 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
                /* may contain flow and gateway attribute */
                nhsize += 2 * nla_total_size(4);
 
+               /* grab encap info */
+               for_nexthops(fi) {
+                       if (nh->nh_lwtstate) {
+                               /* RTA_ENCAP_TYPE */
+                               nh_encapsize += lwtunnel_get_encap_size(
+                                               nh->nh_lwtstate);
+                               /* RTA_ENCAP */
+                               nh_encapsize +=  nla_total_size(2);
+                       }
+               } endfor_nexthops(fi);
+
                /* all nexthops are packed in a nested attribute */
-               payload += nla_total_size(fi->fib_nhs * nhsize);
+               payload += nla_total_size((fi->fib_nhs * nhsize) +
+                                         nh_encapsize);
+
        }
 
        return payload;
@@ -421,13 +438,15 @@ static int fib_detect_death(struct fib_info *fi, int order,
        if (n) {
                state = n->nud_state;
                neigh_release(n);
+       } else {
+               return 0;
        }
        if (state == NUD_REACHABLE)
                return 0;
        if ((state & NUD_VALID) && order != dflt)
                return 0;
        if ((state & NUD_VALID) ||
-           (*last_idx < 0 && order > dflt)) {
+           (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
                *last_resort = fi;
                *last_idx = order;
        }
@@ -452,6 +471,9 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                       int remaining, struct fib_config *cfg)
 {
+       struct net *net = cfg->fc_nlinfo.nl_net;
+       int ret;
+
        change_nexthops(fi) {
                int attrlen;
 
@@ -475,18 +497,66 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                        if (nexthop_nh->nh_tclassid)
                                fi->fib_net->ipv4.fib_num_tclassid_users++;
 #endif
+                       nla = nla_find(attrs, attrlen, RTA_ENCAP);
+                       if (nla) {
+                               struct lwtunnel_state *lwtstate;
+                               struct net_device *dev = NULL;
+                               struct nlattr *nla_entype;
+
+                               nla_entype = nla_find(attrs, attrlen,
+                                                     RTA_ENCAP_TYPE);
+                               if (!nla_entype)
+                                       goto err_inval;
+                               if (cfg->fc_oif)
+                                       dev = __dev_get_by_index(net, cfg->fc_oif);
+                               ret = lwtunnel_build_state(dev, nla_get_u16(
+                                                          nla_entype),
+                                                          nla, &lwtstate);
+                               if (ret)
+                                       goto errout;
+                               nexthop_nh->nh_lwtstate =
+                                       lwtstate_get(lwtstate);
+                       }
                }
 
                rtnh = rtnh_next(rtnh, &remaining);
        } endfor_nexthops(fi);
 
        return 0;
+
+err_inval:
+       ret = -EINVAL;
+
+errout:
+       return ret;
 }
 
 #endif
 
+int fib_encap_match(struct net *net, u16 encap_type,
+                   struct nlattr *encap,
+                   int oif, const struct fib_nh *nh)
+{
+       struct lwtunnel_state *lwtstate;
+       struct net_device *dev = NULL;
+       int ret;
+
+       if (encap_type == LWTUNNEL_ENCAP_NONE)
+               return 0;
+
+       if (oif)
+               dev = __dev_get_by_index(net, oif);
+       ret = lwtunnel_build_state(dev, encap_type,
+                                  encap, &lwtstate);
+       if (!ret)
+               return lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+
+       return 0;
+}
+
 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
 {
+       struct net *net = cfg->fc_nlinfo.nl_net;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        struct rtnexthop *rtnh;
        int remaining;
@@ -496,6 +566,12 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
                return 1;
 
        if (cfg->fc_oif || cfg->fc_gw) {
+               if (cfg->fc_encap) {
+                       if (fib_encap_match(net, cfg->fc_encap_type,
+                                           cfg->fc_encap, cfg->fc_oif,
+                                           fi->fib_nh))
+                           return 1;
+               }
                if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
                    (!cfg->fc_gw  || cfg->fc_gw == fi->fib_nh->nh_gw))
                        return 0;
@@ -882,6 +958,21 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        } else {
                struct fib_nh *nh = fi->fib_nh;
 
+               if (cfg->fc_encap) {
+                       struct lwtunnel_state *lwtstate;
+                       struct net_device *dev = NULL;
+
+                       if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
+                               goto err_inval;
+                       if (cfg->fc_oif)
+                               dev = __dev_get_by_index(net, cfg->fc_oif);
+                       err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+                                                  cfg->fc_encap, &lwtstate);
+                       if (err)
+                               goto failure;
+
+                       nh->nh_lwtstate = lwtstate_get(lwtstate);
+               }
                nh->nh_oif = cfg->fc_oif;
                nh->nh_gw = cfg->fc_gw;
                nh->nh_flags = cfg->fc_flags;
@@ -1055,6 +1146,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
                        goto nla_put_failure;
 #endif
+               if (fi->fib_nh->nh_lwtstate)
+                       lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
        }
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        if (fi->fib_nhs > 1) {
@@ -1090,6 +1183,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                            nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
                                goto nla_put_failure;
 #endif
+                       if (nh->nh_lwtstate)
+                               lwtunnel_fill_encap(skb, nh->nh_lwtstate);
                        /* length of rtnetlink header + attributes */
                        rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
                } endfor_nexthops(fi);
@@ -1202,23 +1297,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
 }
 
 /* Must be invoked inside of an RCU protected region.  */
-void fib_select_default(struct fib_result *res)
+void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
 {
        struct fib_info *fi = NULL, *last_resort = NULL;
        struct hlist_head *fa_head = res->fa_head;
        struct fib_table *tb = res->table;
+       u8 slen = 32 - res->prefixlen;
        int order = -1, last_idx = -1;
-       struct fib_alias *fa;
+       struct fib_alias *fa, *fa1 = NULL;
+       u32 last_prio = res->fi->fib_priority;
+       u8 last_tos = 0;
 
        hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
                struct fib_info *next_fi = fa->fa_info;
 
+               if (fa->fa_slen != slen)
+                       continue;
+               if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
+                       continue;
+               if (fa->tb_id != tb->tb_id)
+                       continue;
+               if (next_fi->fib_priority > last_prio &&
+                   fa->fa_tos == last_tos) {
+                       if (last_tos)
+                               continue;
+                       break;
+               }
+               if (next_fi->fib_flags & RTNH_F_DEAD)
+                       continue;
+               last_tos = fa->fa_tos;
+               last_prio = next_fi->fib_priority;
+
                if (next_fi->fib_scope != res->scope ||
                    fa->fa_type != RTN_UNICAST)
                        continue;
-
-               if (next_fi->fib_priority > res->fi->fib_priority)
-                       break;
                if (!next_fi->fib_nh[0].nh_gw ||
                    next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
                        continue;
@@ -1228,10 +1340,11 @@ void fib_select_default(struct fib_result *res)
                if (!fi) {
                        if (next_fi != res->fi)
                                break;
+                       fa1 = fa;
                } else if (!fib_detect_death(fi, order, &last_resort,
-                                            &last_idx, tb->tb_default)) {
+                                            &last_idx, fa1->fa_default)) {
                        fib_result_assign(res, fi);
-                       tb->tb_default = order;
+                       fa1->fa_default = order;
                        goto out;
                }
                fi = next_fi;
@@ -1239,20 +1352,21 @@ void fib_select_default(struct fib_result *res)
        }
 
        if (order <= 0 || !fi) {
-               tb->tb_default = -1;
+               if (fa1)
+                       fa1->fa_default = -1;
                goto out;
        }
 
        if (!fib_detect_death(fi, order, &last_resort, &last_idx,
-                               tb->tb_default)) {
+                             fa1->fa_default)) {
                fib_result_assign(res, fi);
-               tb->tb_default = order;
+               fa1->fa_default = order;
                goto out;
        }
 
        if (last_idx >= 0)
                fib_result_assign(res, last_resort);
-       tb->tb_default = last_idx;
+       fa1->fa_default = last_idx;
 out:
        return;
 }
index 15d32612e3c6f134a533034aa4d7f74fa52da51b..37c4bb89a7082bbe36b40d928f7fd1d95bfe8252 100644 (file)
@@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                        new_fa->fa_state = state & ~FA_S_ACCESSED;
                        new_fa->fa_slen = fa->fa_slen;
                        new_fa->tb_id = tb->tb_id;
+                       new_fa->fa_default = -1;
 
                        err = switchdev_fib_ipv4_add(key, plen, fi,
                                                     new_fa->fa_tos,
@@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
        new_fa->fa_state = 0;
        new_fa->fa_slen = slen;
        new_fa->tb_id = tb->tb_id;
+       new_fa->fa_default = -1;
 
        /* (Optionally) offload fib entry to switch hardware. */
        err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
@@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb)
                if (hlist_empty(&n->leaf)) {
                        put_child_root(pn, n->key, NULL);
                        node_free(n);
-               } else {
-                       leaf_pull_suffix(pn, n);
                }
        }
 }
@@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb)
                if (hlist_empty(&n->leaf)) {
                        put_child_root(pn, n->key, NULL);
                        node_free(n);
-               } else {
-                       leaf_pull_suffix(pn, n);
                }
        }
 
@@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
                return NULL;
 
        tb->tb_id = id;
-       tb->tb_default = -1;
        tb->tb_num_default = 0;
        tb->tb_data = (alias ? alias->__data : tb->__data);
 
index f5203fba623638d94b03435db86ac4ed696adba8..c0556f1e4bf09233970c8d5c3fd68afa9a78489f 100644 (file)
@@ -496,6 +496,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
                }
                /* Ugh! */
                orefdst = skb_in->_skb_refdst; /* save old refdst */
+               skb_dst_set(skb_in, NULL);
                err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
                                     RT_TOS(tos), rt2->dst.dev);
 
index 5e346a082e5ff05b58cfebb64917ee26001d809d..d0a7c0319e3d1b1b73f828717062b6fbbd3be27d 100644 (file)
@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
        unsigned int evicted = 0;
        HLIST_HEAD(expired);
 
-evict_again:
        spin_lock(&hb->chain_lock);
 
        hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
                if (!inet_fragq_should_evict(fq))
                        continue;
 
-               if (!del_timer(&fq->timer)) {
-                       /* q expiring right now thus increment its refcount so
-                        * it won't be freed under us and wait until the timer
-                        * has finished executing then destroy it
-                        */
-                       atomic_inc(&fq->refcnt);
-                       spin_unlock(&hb->chain_lock);
-                       del_timer_sync(&fq->timer);
-                       inet_frag_put(fq, f);
-                       goto evict_again;
-               }
+               if (!del_timer(&fq->timer))
+                       continue;
 
-               fq->flags |= INET_FRAG_EVICTED;
-               hlist_del(&fq->list);
-               hlist_add_head(&fq->list, &expired);
+               hlist_add_head(&fq->list_evictor, &expired);
                ++evicted;
        }
 
        spin_unlock(&hb->chain_lock);
 
-       hlist_for_each_entry_safe(fq, n, &expired, list)
+       hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
                f->frag_expire((unsigned long) fq);
 
        return evicted;
@@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
        int i;
 
        nf->low_thresh = 0;
-       local_bh_disable();
 
 evict_again:
+       local_bh_disable();
        seq = read_seqbegin(&f->rnd_seqlock);
 
        for (i = 0; i < INETFRAGS_HASHSZ ; i++)
                inet_evict_bucket(f, &f->hash[i]);
 
-       if (read_seqretry(&f->rnd_seqlock, seq))
-               goto evict_again;
-
        local_bh_enable();
+       cond_resched();
+
+       if (read_seqretry(&f->rnd_seqlock, seq) ||
+           percpu_counter_sum(&nf->mem))
+               goto evict_again;
 
        percpu_counter_destroy(&nf->mem);
 }
@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
        struct inet_frag_bucket *hb;
 
        hb = get_frag_bucket_locked(fq, f);
-       if (!(fq->flags & INET_FRAG_EVICTED))
-               hlist_del(&fq->list);
+       hlist_del(&fq->list);
+       fq->flags |= INET_FRAG_COMPLETE;
        spin_unlock(&hb->chain_lock);
 }
 
@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
        if (!(fq->flags & INET_FRAG_COMPLETE)) {
                fq_unlink(fq, f);
                atomic_dec(&fq->refcnt);
-               fq->flags |= INET_FRAG_COMPLETE;
        }
 }
 EXPORT_SYMBOL(inet_frag_kill);
@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
                fp = xp;
        }
        sum = sum_truesize + f->qsize;
-       sub_frag_mem_limit(q, sum);
 
        if (f->destructor)
                f->destructor(q);
        kmem_cache_free(f->frags_cachep, q);
+
+       sub_frag_mem_limit(nf, sum);
 }
 EXPORT_SYMBOL(inet_frag_destroy);
 
@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
 
        q->net = nf;
        f->constructor(q, arg);
-       add_frag_mem_limit(q, f->qsize);
+       add_frag_mem_limit(nf, f->qsize);
 
        setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
        spin_lock_init(&q->lock);
index 5f9b063bbe8ab4f3755a5711ae19b816a3bc2026..89120196a94934e3bb1f201eef9ad7f936ad828b 100644 (file)
@@ -343,7 +343,6 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        struct sock *sk2;
        const struct hlist_nulls_node *node;
        struct inet_timewait_sock *tw = NULL;
-       int twrefcnt = 0;
 
        spin_lock(lock);
 
@@ -371,21 +370,17 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        WARN_ON(!sk_unhashed(sk));
        __sk_nulls_add_node_rcu(sk, &head->chain);
        if (tw) {
-               twrefcnt = inet_twsk_unhash(tw);
+               sk_nulls_del_node_init_rcu((struct sock *)tw);
                NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
        }
        spin_unlock(lock);
-       if (twrefcnt)
-               inet_twsk_put(tw);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
        if (twp) {
                *twp = tw;
        } else if (tw) {
                /* Silly. Should hash-dance instead... */
-               inet_twsk_deschedule(tw);
-
-               inet_twsk_put(tw);
+               inet_twsk_deschedule_put(tw);
        }
        return 0;
 
@@ -403,13 +398,12 @@ static u32 inet_sk_port_offset(const struct sock *sk)
                                          inet->inet_dport);
 }
 
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct hlist_nulls_head *list;
        struct inet_ehash_bucket *head;
        spinlock_t *lock;
-       int twrefcnt = 0;
 
        WARN_ON(!sk_unhashed(sk));
 
@@ -420,23 +414,22 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
 
        spin_lock(lock);
        __sk_nulls_add_node_rcu(sk, list);
-       if (tw) {
-               WARN_ON(sk->sk_hash != tw->tw_hash);
-               twrefcnt = inet_twsk_unhash(tw);
+       if (osk) {
+               WARN_ON(sk->sk_hash != osk->sk_hash);
+               sk_nulls_del_node_init_rcu(osk);
        }
        spin_unlock(lock);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-       return twrefcnt;
 }
 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
 
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash(struct sock *sk, struct sock *osk)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct inet_listen_hashbucket *ilb;
 
        if (sk->sk_state != TCP_LISTEN)
-               return __inet_hash_nolisten(sk, tw);
+               return __inet_hash_nolisten(sk, osk);
 
        WARN_ON(!sk_unhashed(sk));
        ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -445,7 +438,6 @@ int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
        __sk_nulls_add_node_rcu(sk, &ilb->head);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        spin_unlock(&ilb->lock);
-       return 0;
 }
 EXPORT_SYMBOL(__inet_hash);
 
@@ -492,7 +484,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
        struct inet_bind_bucket *tb;
        int ret;
        struct net *net = sock_net(sk);
-       int twrefcnt = 1;
 
        if (!snum) {
                int i, remaining, low, high, port;
@@ -560,19 +551,14 @@ ok:
                inet_bind_hash(sk, tb, port);
                if (sk_unhashed(sk)) {
                        inet_sk(sk)->inet_sport = htons(port);
-                       twrefcnt += __inet_hash_nolisten(sk, tw);
+                       __inet_hash_nolisten(sk, (struct sock *)tw);
                }
                if (tw)
-                       twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+                       inet_twsk_bind_unhash(tw, hinfo);
                spin_unlock(&head->lock);
 
-               if (tw) {
-                       inet_twsk_deschedule(tw);
-                       while (twrefcnt) {
-                               twrefcnt--;
-                               inet_twsk_put(tw);
-                       }
-               }
+               if (tw)
+                       inet_twsk_deschedule_put(tw);
 
                ret = 0;
                goto out;
@@ -624,22 +610,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
 
 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
 {
+       unsigned int locksz = sizeof(spinlock_t);
        unsigned int i, nblocks = 1;
 
-       if (sizeof(spinlock_t) != 0) {
+       if (locksz != 0) {
                /* allocate 2 cache lines or at least one spinlock per cpu */
-               nblocks = max_t(unsigned int,
-                               2 * L1_CACHE_BYTES / sizeof(spinlock_t),
-                               1);
+               nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
                nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
 
                /* no more locks than number of hash buckets */
                nblocks = min(nblocks, hashinfo->ehash_mask + 1);
 
-               hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t),
+               hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
                                                      GFP_KERNEL | __GFP_NOWARN);
                if (!hashinfo->ehash_locks)
-                       hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t));
+                       hashinfo->ehash_locks = vmalloc(nblocks * locksz);
 
                if (!hashinfo->ehash_locks)
                        return -ENOMEM;
index 2ffbd16b79e00279235244c3412046062a86fec5..ae22cc24fbe89b32be1f2142450c198e78026851 100644 (file)
 #include <net/ip.h>
 
 
-/**
- *     inet_twsk_unhash - unhash a timewait socket from established hash
- *     @tw: timewait socket
- *
- *     unhash a timewait socket from established hash, if hashed.
- *     ehash lock must be held by caller.
- *     Returns 1 if caller should call inet_twsk_put() after lock release.
- */
-int inet_twsk_unhash(struct inet_timewait_sock *tw)
-{
-       if (hlist_nulls_unhashed(&tw->tw_node))
-               return 0;
-
-       hlist_nulls_del_rcu(&tw->tw_node);
-       sk_nulls_node_init(&tw->tw_node);
-       /*
-        * We cannot call inet_twsk_put() ourself under lock,
-        * caller must call it for us.
-        */
-       return 1;
-}
-
 /**
  *     inet_twsk_bind_unhash - unhash a timewait socket from bind hash
  *     @tw: timewait socket
@@ -48,35 +26,29 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw)
  *     bind hash lock must be held by caller.
  *     Returns 1 if caller should call inet_twsk_put() after lock release.
  */
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
                          struct inet_hashinfo *hashinfo)
 {
        struct inet_bind_bucket *tb = tw->tw_tb;
 
        if (!tb)
-               return 0;
+               return;
 
        __hlist_del(&tw->tw_bind_node);
        tw->tw_tb = NULL;
        inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
-       /*
-        * We cannot call inet_twsk_put() ourself under lock,
-        * caller must call it for us.
-        */
-       return 1;
+       __sock_put((struct sock *)tw);
 }
 
 /* Must be called with locally disabled BHs. */
 static void inet_twsk_kill(struct inet_timewait_sock *tw)
 {
        struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
-       struct inet_bind_hashbucket *bhead;
-       int refcnt;
-       /* Unlink from established hashes. */
        spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+       struct inet_bind_hashbucket *bhead;
 
        spin_lock(lock);
-       refcnt = inet_twsk_unhash(tw);
+       sk_nulls_del_node_init_rcu((struct sock *)tw);
        spin_unlock(lock);
 
        /* Disassociate with bind bucket. */
@@ -84,11 +56,9 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
                        hashinfo->bhash_size)];
 
        spin_lock(&bhead->lock);
-       refcnt += inet_twsk_bind_unhash(tw, hashinfo);
+       inet_twsk_bind_unhash(tw, hashinfo);
        spin_unlock(&bhead->lock);
 
-       BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
-       atomic_sub(refcnt, &tw->tw_refcnt);
        atomic_dec(&tw->tw_dr->tw_count);
        inet_twsk_put(tw);
 }
@@ -235,13 +205,17 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
  * tcp_input.c to verify this.
  */
 
-/* This is for handling early-kills of TIME_WAIT sockets. */
-void inet_twsk_deschedule(struct inet_timewait_sock *tw)
+/* This is for handling early-kills of TIME_WAIT sockets.
+ * Warning : consume reference.
+ * Caller should not access tw anymore.
+ */
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
 {
        if (del_timer_sync(&tw->tw_timer))
                inet_twsk_kill(tw);
+       inet_twsk_put(tw);
 }
-EXPORT_SYMBOL(inet_twsk_deschedule);
+EXPORT_SYMBOL(inet_twsk_deschedule_put);
 
 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
 {
@@ -311,9 +285,8 @@ restart:
 
                        rcu_read_unlock();
                        local_bh_disable();
-                       inet_twsk_deschedule(tw);
+                       inet_twsk_deschedule_put(tw);
                        local_bh_enable();
-                       inet_twsk_put(tw);
                        goto restart_rcu;
                }
                /* If the nulls value we got at the end of this lookup is
index a50dc6d408d11c339b38f2436216c8568c4149cf..d96722ae89796ef27ec725a695b60fbce4c47fbc 100644 (file)
@@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg)
        ipq_kill(qp);
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
 
-       if (!(qp->q.flags & INET_FRAG_EVICTED)) {
+       if (!inet_frag_evicting(&qp->q)) {
                struct sk_buff *head = qp->q.fragments;
                const struct iphdr *iph;
                int err;
@@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp)
                kfree_skb(fp);
                fp = xp;
        } while (fp);
-       sub_frag_mem_limit(&qp->q, sum_truesize);
+       sub_frag_mem_limit(qp->q.net, sum_truesize);
 
        qp->q.flags = 0;
        qp->q.len = 0;
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        ihl = ip_hdrlen(skb);
 
        /* Determine the position of this fragment. */
-       end = offset + skb->len - ihl;
+       end = offset + skb->len - skb_network_offset(skb) - ihl;
        err = -EINVAL;
 
        /* Is this the final fragment? */
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                goto err;
 
        err = -ENOMEM;
-       if (!pskb_pull(skb, ihl))
+       if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
                goto err;
 
        err = pskb_trim_rcsum(skb, end - offset);
@@ -455,7 +455,7 @@ found:
                                qp->q.fragments = next;
 
                        qp->q.meat -= free_it->len;
-                       sub_frag_mem_limit(&qp->q, free_it->truesize);
+                       sub_frag_mem_limit(qp->q.net, free_it->truesize);
                        kfree_skb(free_it);
                }
        }
@@ -479,7 +479,7 @@ found:
        qp->q.stamp = skb->tstamp;
        qp->q.meat += skb->len;
        qp->ecn |= ecn;
-       add_frag_mem_limit(&qp->q, skb->truesize);
+       add_frag_mem_limit(qp->q.net, skb->truesize);
        if (offset == 0)
                qp->q.flags |= INET_FRAG_FIRST_IN;
 
@@ -522,7 +522,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
        int len;
        int ihlen;
        int err;
-       int sum_truesize;
        u8 ecn;
 
        ipq_kill(qp);
@@ -587,35 +586,22 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                head->len -= clone->len;
                clone->csum = 0;
                clone->ip_summed = head->ip_summed;
-               add_frag_mem_limit(&qp->q, clone->truesize);
+               add_frag_mem_limit(qp->q.net, clone->truesize);
        }
 
+       skb_shinfo(head)->frag_list = head->next;
        skb_push(head, head->data - skb_network_header(head));
 
-       sum_truesize = head->truesize;
-       for (fp = head->next; fp;) {
-               bool headstolen;
-               int delta;
-               struct sk_buff *next = fp->next;
-
-               sum_truesize += fp->truesize;
+       for (fp=head->next; fp; fp = fp->next) {
+               head->data_len += fp->len;
+               head->len += fp->len;
                if (head->ip_summed != fp->ip_summed)
                        head->ip_summed = CHECKSUM_NONE;
                else if (head->ip_summed == CHECKSUM_COMPLETE)
                        head->csum = csum_add(head->csum, fp->csum);
-
-               if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
-                       kfree_skb_partial(fp, headstolen);
-               } else {
-                       if (!skb_shinfo(head)->frag_list)
-                               skb_shinfo(head)->frag_list = fp;
-                       head->data_len += fp->len;
-                       head->len += fp->len;
-                       head->truesize += fp->truesize;
-               }
-               fp = next;
+               head->truesize += fp->truesize;
        }
-       sub_frag_mem_limit(&qp->q, sum_truesize);
+       sub_frag_mem_limit(qp->q.net, head->truesize);
 
        head->next = NULL;
        head->dev = dev;
@@ -641,6 +627,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                iph->frag_off = 0;
        }
 
+       ip_send_check(iph);
+
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
        qp->q.fragments = NULL;
        qp->q.fragments_tail = NULL;
index 2db4c8773c1b405da48758db66969060df2f0812..f4fc8a77aaa79dcb5156bfb2de84efdf24808713 100644 (file)
 #include <net/xfrm.h>
 #include <linux/mroute.h>
 #include <linux/netlink.h>
+#include <net/dst_metadata.h>
 
 /*
  *     Process Router Attention IP option (RFC 2113)
@@ -331,7 +332,7 @@ static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
         *      Initialise the virtual path cache for the packet. It describes
         *      how the packet travels inside Linux networking.
         */
-       if (!skb_dst(skb)) {
+       if (!skb_valid_dst(skb)) {
                int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
                                               iph->tos, skb->dev);
                if (unlikely(err)) {
index 6a51a71a6c67a0f3e48523a37e3b559306885de9..5512f4e4ec1b1b629a9c1fe1c7ac2ac4ffced248 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/static_key.h>
 
 #include <net/ip.h>
 #include <net/icmp.h>
@@ -190,3 +191,123 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
        return tot;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+
+static const struct nla_policy ip_tun_policy[IP_TUN_MAX + 1] = {
+       [IP_TUN_ID]             = { .type = NLA_U64 },
+       [IP_TUN_DST]            = { .type = NLA_U32 },
+       [IP_TUN_SRC]            = { .type = NLA_U32 },
+       [IP_TUN_TTL]            = { .type = NLA_U8 },
+       [IP_TUN_TOS]            = { .type = NLA_U8 },
+       [IP_TUN_SPORT]          = { .type = NLA_U16 },
+       [IP_TUN_DPORT]          = { .type = NLA_U16 },
+       [IP_TUN_FLAGS]          = { .type = NLA_U16 },
+};
+
+static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+                             struct lwtunnel_state **ts)
+{
+       struct ip_tunnel_info *tun_info;
+       struct lwtunnel_state *new_state;
+       struct nlattr *tb[IP_TUN_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy);
+       if (err < 0)
+               return err;
+
+       new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+       if (!new_state)
+               return -ENOMEM;
+
+       new_state->type = LWTUNNEL_ENCAP_IP;
+
+       tun_info = lwt_tun_info(new_state);
+
+       if (tb[IP_TUN_ID])
+               tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]);
+
+       if (tb[IP_TUN_DST])
+               tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]);
+
+       if (tb[IP_TUN_SRC])
+               tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]);
+
+       if (tb[IP_TUN_TTL])
+               tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]);
+
+       if (tb[IP_TUN_TOS])
+               tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]);
+
+       if (tb[IP_TUN_SPORT])
+               tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]);
+
+       if (tb[IP_TUN_DPORT])
+               tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]);
+
+       if (tb[IP_TUN_FLAGS])
+               tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]);
+
+       tun_info->mode = IP_TUNNEL_INFO_TX;
+       tun_info->options = NULL;
+       tun_info->options_len = 0;
+
+       *ts = new_state;
+
+       return 0;
+}
+
+static int ip_tun_fill_encap_info(struct sk_buff *skb,
+                                 struct lwtunnel_state *lwtstate)
+{
+       struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+       if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) ||
+           nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) ||
+           nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) ||
+           nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) ||
+           nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) ||
+           nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) ||
+           nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) ||
+           nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       return nla_total_size(8)        /* IP_TUN_ID */
+               + nla_total_size(4)     /* IP_TUN_DST */
+               + nla_total_size(4)     /* IP_TUN_SRC */
+               + nla_total_size(1)     /* IP_TUN_TOS */
+               + nla_total_size(1)     /* IP_TUN_TTL */
+               + nla_total_size(2)     /* IP_TUN_SPORT */
+               + nla_total_size(2)     /* IP_TUN_DPORT */
+               + nla_total_size(2);    /* IP_TUN_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+       .build_state = ip_tun_build_state,
+       .fill_encap = ip_tun_fill_encap_info,
+       .get_encap_size = ip_tun_encap_nlsize,
+};
+
+void __init ip_tunnel_core_init(void)
+{
+       lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+}
+
+struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
+
+void ip_tunnel_need_metadata(void)
+{
+       static_key_slow_inc(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
+
+void ip_tunnel_unneed_metadata(void)
+{
+       static_key_slow_dec(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
index 92305a1a021a7936206f9078008532bc151ec757..c416cb355cb0cdc6932e32ee7b93cfe8680bf0e5 100644 (file)
@@ -240,7 +240,7 @@ get_entry(const void *base, unsigned int offset)
        return (struct arpt_entry *)(base + offset);
 }
 
-static inline __pure
+static inline
 struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
 {
        return (void *)entry + entry->next_offset;
@@ -280,6 +280,9 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        table_base = private->entries;
        jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
 
+       /* No TEE support for arptables, so no need to switch to alternate
+        * stack.  All targets that reenter must return absolute verdicts.
+        */
        e = get_entry(table_base, private->hook_entry[hook]);
 
        acpar.in      = state->in;
@@ -325,11 +328,6 @@ unsigned int arpt_do_table(struct sk_buff *skb,
                        }
                        if (table_base + v
                            != arpt_next_entry(e)) {
-
-                               if (stackidx >= private->stacksize) {
-                                       verdict = NF_DROP;
-                                       break;
-                               }
                                jumpstack[stackidx++] = e;
                        }
 
@@ -337,9 +335,6 @@ unsigned int arpt_do_table(struct sk_buff *skb,
                        continue;
                }
 
-               /* Targets which reenter must return
-                * abs. verdicts
-                */
                acpar.target   = t->u.kernel.target;
                acpar.targinfo = t->data;
                verdict = t->u.kernel.target->target(skb, &acpar);
@@ -372,10 +367,13 @@ static inline bool unconditional(const struct arpt_arp *arp)
 
 /* Figures out from what hook each rule can be called: returns 0 if
  * there are loops.  Puts hook bitmask in comefrom.
+ *
+ * Keeps track of largest call depth seen and stores it in newinfo->stacksize.
  */
-static int mark_source_chains(const struct xt_table_info *newinfo,
+static int mark_source_chains(struct xt_table_info *newinfo,
                              unsigned int valid_hooks, void *entry0)
 {
+       unsigned int calldepth, max_calldepth = 0;
        unsigned int hook;
 
        /* No recursion; use packet counter to save back ptrs (reset
@@ -391,6 +389,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
 
                /* Set initial back pointer. */
                e->counters.pcnt = pos;
+               calldepth = 0;
 
                for (;;) {
                        const struct xt_standard_target *t
@@ -445,6 +444,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                        (entry0 + pos + size);
                                e->counters.pcnt = pos;
                                pos += size;
+                               if (calldepth > 0)
+                                       --calldepth;
                        } else {
                                int newpos = t->verdict;
 
@@ -459,6 +460,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                                                return 0;
                                        }
 
+                                       if (entry0 + newpos != arpt_next_entry(e) &&
+                                           ++calldepth > max_calldepth)
+                                               max_calldepth = calldepth;
+
                                        /* This a jump; chase it. */
                                        duprintf("Jump rule %u -> %u\n",
                                                 pos, newpos);
@@ -475,6 +480,7 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
                next:
                duprintf("Finished chain %u\n", hook);
        }
+       newinfo->stacksize = max_calldepth;
        return 1;
 }
 
@@ -664,9 +670,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
                if (ret != 0)
                        break;
                ++i;
-               if (strcmp(arpt_get_target(iter)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
        }
        duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
        if (ret != 0)
@@ -1439,9 +1442,6 @@ static int translate_compat_table(const char *name,
                        break;
                }
                ++i;
-               if (strcmp(arpt_get_target(iter1)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 6c72fbb7b49eb97d3574fd8c10174f9580752911..787f99ed55e266b209598a496b5df71be20913ad 100644 (file)
@@ -276,7 +276,7 @@ static void trace_packet(const struct sk_buff *skb,
 }
 #endif
 
-static inline __pure
+static inline
 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
 {
        return (void *)entry + entry->next_offset;
@@ -296,12 +296,13 @@ ipt_do_table(struct sk_buff *skb,
        const char *indev, *outdev;
        const void *table_base;
        struct ipt_entry *e, **jumpstack;
-       unsigned int *stackptr, origptr, cpu;
+       unsigned int stackidx, cpu;
        const struct xt_table_info *private;
        struct xt_action_param acpar;
        unsigned int addend;
 
        /* Initialization */
+       stackidx = 0;
        ip = ip_hdr(skb);
        indev = state->in ? state->in->name : nulldevname;
        outdev = state->out ? state->out->name : nulldevname;
@@ -331,13 +332,21 @@ ipt_do_table(struct sk_buff *skb,
        smp_read_barrier_depends();
        table_base = private->entries;
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
-       stackptr   = per_cpu_ptr(private->stackptr, cpu);
-       origptr    = *stackptr;
+
+       /* Switch to alternate jumpstack if we're being invoked via TEE.
+        * TEE issues XT_CONTINUE verdict on original skb so we must not
+        * clobber the jumpstack.
+        *
+        * For recursion via REJECT or SYNPROXY the stack will be clobbered
+        * but it is no problem since absolute verdict is issued by these.
+        */
+       if (static_key_false(&xt_tee_enabled))
+               jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
        e = get_entry(table_base, private->hook_entry[hook]);
 
-       pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
-                table->name, hook, origptr,
+       pr_debug("Entering %s(hook %u), UF %p\n",
+                table->name, hook,
                 get_entry(table_base, private->underflow[hook]));
 
        do {
@@ -383,28 +392,24 @@ ipt_do_table(struct sk_buff *skb,
                                        verdict = (unsigned int)(-v) - 1;
                                        break;
                                }
-                               if (*stackptr <= origptr) {
+                               if (stackidx == 0) {
                                        e = get_entry(table_base,
                                            private->underflow[hook]);
                                        pr_debug("Underflow (this is normal) "
                                                 "to %p\n", e);
                                } else {
-                                       e = jumpstack[--*stackptr];
+                                       e = jumpstack[--stackidx];
                                        pr_debug("Pulled %p out from pos %u\n",
-                                                e, *stackptr);
+                                                e, stackidx);
                                        e = ipt_next_entry(e);
                                }
                                continue;
                        }
                        if (table_base + v != ipt_next_entry(e) &&
                            !(e->ip.flags & IPT_F_GOTO)) {
-                               if (*stackptr >= private->stacksize) {
-                                       verdict = NF_DROP;
-                                       break;
-                               }
-                               jumpstack[(*stackptr)++] = e;
+                               jumpstack[stackidx++] = e;
                                pr_debug("Pushed %p into pos %u\n",
-                                        e, *stackptr - 1);
+                                        e, stackidx - 1);
                        }
 
                        e = get_entry(table_base, v);
@@ -423,9 +428,8 @@ ipt_do_table(struct sk_buff *skb,
                        /* Verdict */
                        break;
        } while (!acpar.hotdrop);
-       pr_debug("Exiting %s; resetting sp from %u to %u\n",
-                __func__, *stackptr, origptr);
-       *stackptr = origptr;
+       pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
+
        xt_write_recseq_end(addend);
        local_bh_enable();
 
@@ -439,11 +443,15 @@ ipt_do_table(struct sk_buff *skb,
 }
 
 /* Figures out from what hook each rule can be called: returns 0 if
-   there are loops.  Puts hook bitmask in comefrom. */
+ * there are loops.  Puts hook bitmask in comefrom.
+ *
+ * Keeps track of largest call depth seen and stores it in newinfo->stacksize.
+ */
 static int
-mark_source_chains(const struct xt_table_info *newinfo,
+mark_source_chains(struct xt_table_info *newinfo,
                   unsigned int valid_hooks, void *entry0)
 {
+       unsigned int calldepth, max_calldepth = 0;
        unsigned int hook;
 
        /* No recursion; use packet counter to save back ptrs (reset
@@ -457,6 +465,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
 
                /* Set initial back pointer. */
                e->counters.pcnt = pos;
+               calldepth = 0;
 
                for (;;) {
                        const struct xt_standard_target *t
@@ -518,6 +527,9 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                        (entry0 + pos + size);
                                e->counters.pcnt = pos;
                                pos += size;
+                               WARN_ON_ONCE(calldepth == 0);
+                               if (calldepth > 0)
+                                       --calldepth;
                        } else {
                                int newpos = t->verdict;
 
@@ -531,9 +543,14 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                                                newpos);
                                                return 0;
                                        }
+                                       if (entry0 + newpos != ipt_next_entry(e) &&
+                                           !(e->ip.flags & IPT_F_GOTO) &&
+                                           ++calldepth > max_calldepth)
+                                               max_calldepth = calldepth;
+
                                        /* This a jump; chase it. */
-                                       duprintf("Jump rule %u -> %u\n",
-                                                pos, newpos);
+                                       duprintf("Jump rule %u -> %u, calldepth %d\n",
+                                                pos, newpos, calldepth);
                                } else {
                                        /* ... this is a fallthru */
                                        newpos = pos + e->next_offset;
@@ -547,6 +564,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
                next:
                duprintf("Finished chain %u\n", hook);
        }
+       newinfo->stacksize = max_calldepth;
        return 1;
 }
 
@@ -826,9 +844,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
                if (ret != 0)
                        return ret;
                ++i;
-               if (strcmp(ipt_get_target(iter)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
        }
 
        if (i != repl->num_entries) {
@@ -1744,9 +1759,6 @@ translate_compat_table(struct net *net,
                if (ret != 0)
                        break;
                ++i;
-               if (strcmp(ipt_get_target(iter1)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index c88b7d4347187260e789612c44f34059092dc41d..b69e82bda2159464b2eb0b0fd7c184c605947cc8 100644 (file)
@@ -49,12 +49,9 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
        if (skb->nfct)
                zone = nf_ct_zone((struct nf_conn *)skb->nfct);
 #endif
-
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-       if (skb->nf_bridge &&
-           skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+       if (nf_bridge_in_prerouting(skb))
                return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
-#endif
+
        if (hooknum == NF_INET_PRE_ROUTING)
                return IP_DEFRAG_CONNTRACK_IN + zone;
        else
index 05ff44b758dfee1e02996a3726ac63854a96ad16..e89094ab5ddb8ce2b6eb2d78a9a9046b42287bd5 100644 (file)
@@ -363,7 +363,8 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
                                                    scoped);
                rcu_read_unlock();
 
-               if (!(isk->freebind || isk->transparent || has_addr ||
+               if (!(net->ipv6.sysctl.ip_nonlocal_bind ||
+                     isk->freebind || isk->transparent || has_addr ||
                      addr_type == IPV6_ADDR_ANY))
                        return -EADDRNOTAVAIL;
 
index da5d483e236ac1e37b631c6091219fbefbe497b4..3abd9d7a3adf323bd688b1ab6dabda1248c67be1 100644 (file)
@@ -300,6 +300,8 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
        SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
        SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
+       SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
+       SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
        SNMP_MIB_SENTINEL
 };
 
index d0362a2de3d3805260c878f5e3a9341e225cade9..18fd7c9095c706c13240b4624aa291ead0b0171a 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/slab.h>
 #include <linux/jhash.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 #include <net/ip.h>
 #include <net/tcp.h>
 #include <net/icmp.h>
 #include <net/xfrm.h>
+#include <net/lwtunnel.h>
 #include <net/netevent.h>
 #include <net/rtnetlink.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/kmemleak.h>
 #endif
 #include <net/secure_seq.h>
+#include <net/ip_tunnels.h>
 
 #define RT_FL_TOS(oldflp4) \
        ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -1355,6 +1358,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
                list_del(&rt->rt_uncached);
                spin_unlock_bh(&ul->lock);
        }
+       lwtstate_put(rt->rt_lwtstate);
 }
 
 void rt_flush_dev(struct net_device *dev)
@@ -1403,6 +1407,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
 #ifdef CONFIG_IP_ROUTE_CLASSID
                rt->dst.tclassid = nh->nh_tclassid;
 #endif
+               rt->rt_lwtstate = lwtstate_get(nh->nh_lwtstate);
                if (unlikely(fnhe))
                        cached = rt_bind_exception(rt, fnhe, daddr);
                else if (!(rt->dst.flags & DST_NOCACHE))
@@ -1488,6 +1493,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
+       rth->rt_lwtstate = NULL;
        if (our) {
                rth->dst.input= ip_local_deliver;
                rth->rt_flags |= RTCF_LOCAL;
@@ -1546,7 +1552,6 @@ static int __mkroute_input(struct sk_buff *skb,
        struct rtable *rth;
        int err;
        struct in_device *out_dev;
-       unsigned int flags = 0;
        bool do_cache;
        u32 itag = 0;
 
@@ -1610,7 +1615,7 @@ static int __mkroute_input(struct sk_buff *skb,
        }
 
        rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
-       rth->rt_flags = flags;
+       rth->rt_flags = 0;
        rth->rt_type = res->type;
        rth->rt_is_input = 1;
        rth->rt_iif     = 0;
@@ -1618,12 +1623,15 @@ static int __mkroute_input(struct sk_buff *skb,
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
+       rth->rt_lwtstate = NULL;
        RT_CACHE_STAT_INC(in_slow_tot);
 
        rth->dst.input = ip_forward;
        rth->dst.output = ip_output;
 
        rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
+       if (lwtunnel_output_redirect(rth->rt_lwtstate))
+               rth->dst.output = lwtunnel_output;
        skb_dst_set(skb, &rth->dst);
 out:
        err = 0;
@@ -1662,6 +1670,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 {
        struct fib_result res;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
+       struct ip_tunnel_info *tun_info;
        struct flowi4   fl4;
        unsigned int    flags = 0;
        u32             itag = 0;
@@ -1679,6 +1688,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
           by fib_lookup.
         */
 
+       tun_info = skb_tunnel_info(skb, AF_INET);
+       if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
+               fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
+       else
+               fl4.flowi4_tun_key.tun_id = 0;
+       skb_dst_drop(skb);
+
        if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
                goto martian_source;
 
@@ -1792,6 +1808,8 @@ local_input:
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
+       rth->rt_lwtstate = NULL;
+
        RT_CACHE_STAT_INC(in_slow_tot);
        if (res.type == RTN_UNREACHABLE) {
                rth->dst.input= ip_error;
@@ -1981,7 +1999,7 @@ add:
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-
+       rth->rt_lwtstate = NULL;
        RT_CACHE_STAT_INC(out_slow_tot);
 
        if (flags & RTCF_LOCAL)
@@ -2004,6 +2022,8 @@ add:
        }
 
        rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
+       if (lwtunnel_output_redirect(rth->rt_lwtstate))
+               rth->dst.output = lwtunnel_output;
 
        return rth;
 }
@@ -2176,7 +2196,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
        if (!res.prefixlen &&
            res.table->tb_num_default > 1 &&
            res.type == RTN_UNICAST && !fl4->flowi4_oif)
-               fib_select_default(&res);
+               fib_select_default(fl4, &res);
 
        if (!fl4->saddr)
                fl4->saddr = FIB_RES_PREFSRC(net, res);
@@ -2261,7 +2281,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
                rt->rt_uses_gateway = ort->rt_uses_gateway;
 
                INIT_LIST_HEAD(&rt->rt_uncached);
-
+               rt->rt_lwtstate = NULL;
                dst_free(new);
        }
 
index 7f4056785accb76eec60e22dc0bb19febc98f75f..45534a5ab43065307bfe2708a5ae08936cc5969a 100644 (file)
@@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                                ret = -EAGAIN;
                                break;
                        }
-                       sk_wait_data(sk, &timeo);
+                       sk_wait_data(sk, &timeo, NULL);
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
                                break;
@@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
        int target;             /* Read at least this many bytes */
        long timeo;
        struct task_struct *user_recv = NULL;
-       struct sk_buff *skb;
+       struct sk_buff *skb, *last;
        u32 urg_hole = 0;
 
        if (unlikely(flags & MSG_ERRQUEUE))
@@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
 
                /* Next get a buffer. */
 
+               last = skb_peek_tail(&sk->sk_receive_queue);
                skb_queue_walk(&sk->sk_receive_queue, skb) {
+                       last = skb;
                        /* Now that we have two receive queues this
                         * shouldn't happen.
                         */
@@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                        /* Do not sleep, just process backlog. */
                        release_sock(sk);
                        lock_sock(sk);
-               } else
-                       sk_wait_data(sk, &timeo);
+               } else {
+                       sk_wait_data(sk, &timeo, last);
+               }
 
                if (user_recv) {
                        int chunk;
index c037644eafb7caadcb196b1c8b676bbc42abdb93..fd1405d37c149309882742fb12b07331e7282a95 100644 (file)
@@ -146,7 +146,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                bictcp_update(ca, tp->snd_cwnd);
index 8c6fd3d5e40feeb3c0b422d0e697e1a674b4f576..167b6a3e1b9868c88e5553b114556ae312dfb99f 100644 (file)
@@ -264,7 +264,7 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        u32 prior_snd_cwnd;
        u32 incr;
 
-       if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect)
+       if (tcp_in_slow_start(tp) && hystart_detect)
                tcp_cdg_hystart_update(sk);
 
        if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
index 84be008c945c654b692211b943f83e909a622516..a2ed23c595cf185cadbebcdf19e801012a64250a 100644 (file)
@@ -365,10 +365,8 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
  */
 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
-       u32 cwnd = tp->snd_cwnd + acked;
+       u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
 
-       if (cwnd > tp->snd_ssthresh)
-               cwnd = tp->snd_ssthresh + 1;
        acked -= cwnd - tp->snd_cwnd;
        tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
 
@@ -413,7 +411,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In "safe" area, increase. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+       if (tcp_in_slow_start(tp)) {
                acked = tcp_slow_start(tp, acked);
                if (!acked)
                        return;
index 06d3d665a9fd1bfda5688907a284de83697273f6..28011fb1f4a2104a34f81fc0c9fb4a4382bdadac 100644 (file)
@@ -320,7 +320,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+       if (tcp_in_slow_start(tp)) {
                if (hystart && after(ack, ca->end_seq))
                        bictcp_hystart_reset(sk);
                acked = tcp_slow_start(tp, acked);
@@ -439,7 +439,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                ca->delay_min = delay;
 
        /* hystart triggers when cwnd is larger than some threshold */
-       if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
+       if (hystart && tcp_in_slow_start(tp) &&
            tp->snd_cwnd >= hystart_low_window)
                hystart_update(sk, delay);
 }
index 882c08aae2f58d02bb78212a4eba4d25d7e9c123..db7842495a641829a8725cb436ed2fb3aa5d53e4 100644 (file)
@@ -116,7 +116,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                /* Update AIMD parameters.
index 58469fff6c18fd444c95366caa04ab60965d654a..82f0d9ed60f50f27854fdb62a95281beed9df819 100644 (file)
@@ -236,7 +236,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                /* In dangerous area, increase slowly.
index f963b274f2b0436755ebe8bb5586b1ec9682c336..083831e359df92ca9ba0fe7dd5a7a76fe41a94b0 100644 (file)
@@ -112,7 +112,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
        rho_fractions = ca->rho_3ls - (ca->rho << 3);
 
-       if (tp->snd_cwnd < tp->snd_ssthresh) {
+       if (tcp_in_slow_start(tp)) {
                /*
                 * slow start
                 *      INC = 2^RHO - 1
index f71002e4db0ba7fe8dfe35bb2196bbaae751ed59..2ab9bbb6faffb799560df98b093d4cbc1207d816 100644 (file)
@@ -268,7 +268,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In slow start */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 
        else {
index 684f095d196e20333adb235fc96a8fb8f0dd691c..4e4d6bcd0ca973226b9ebcf233f777ab3c68be7e 100644 (file)
@@ -109,6 +109,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define FLAG_SYN_ACKED         0x10 /* This ACK acknowledged SYN.              */
 #define FLAG_DATA_SACKED       0x20 /* New SACK.                               */
 #define FLAG_ECE               0x40 /* ECE in this ACK                         */
+#define FLAG_LOST_RETRANS      0x80 /* This ACK marks some retransmission lost */
 #define FLAG_SLOWPATH          0x100 /* Do not skip RFC checks for window update.*/
 #define FLAG_ORIG_SACK_ACKED   0x200 /* Never retransmitted data are (s)acked  */
 #define FLAG_SND_UNA_ADVANCED  0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -196,11 +197,13 @@ static void tcp_enter_quickack_mode(struct sock *sk)
  * and the session is not interactive.
  */
 
-static inline bool tcp_in_quickack_mode(const struct sock *sk)
+static bool tcp_in_quickack_mode(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
+       const struct dst_entry *dst = __sk_dst_get(sk);
 
-       return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
+       return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
+               (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
 }
 
 static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
@@ -1037,7 +1040,7 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
  * highest SACK block). Also calculate the lowest snd_nxt among the remaining
  * retransmitted skbs to avoid some costly processing per ACKs.
  */
-static void tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -1078,7 +1081,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
                if (after(received_upto, ack_seq)) {
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                        tp->retrans_out -= tcp_skb_pcount(skb);
-
+                       *flag |= FLAG_LOST_RETRANS;
                        tcp_skb_mark_lost_uncond_verify(tp, skb);
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
                } else {
@@ -1818,7 +1821,7 @@ advance_sp:
            ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
                tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
-       tcp_mark_lost_retrans(sk);
+       tcp_mark_lost_retrans(sk, &state->flag);
        tcp_verify_left_out(tp);
 out:
 
@@ -1917,14 +1920,13 @@ void tcp_enter_loss(struct sock *sk)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       bool new_recovery = false;
+       bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
        bool is_reneg;                  /* is receiver reneging on SACKs? */
 
        /* Reduce ssthresh if it has not yet been made inside this window. */
        if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
            !after(tp->high_seq, tp->snd_una) ||
            (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
-               new_recovery = true;
                tp->prior_ssthresh = tcp_current_ssthresh(sk);
                tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
                tcp_ca_event(sk, CA_EVENT_LOSS);
@@ -2475,15 +2477,14 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
        return false;
 }
 
-/* The cwnd reduction in CWR and Recovery use the PRR algorithm
- * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
  * It computes the number of packets to send (sndcnt) based on packets newly
  * delivered:
  *   1) If the packets in flight is larger than ssthresh, PRR spreads the
  *     cwnd reductions across a full RTT.
- *   2) If packets in flight is lower than ssthresh (such as due to excess
- *     losses and/or application stalls), do not perform any further cwnd
- *     reductions, but instead slow start up to ssthresh.
+ *   2) Otherwise PRR uses packet conservation to send as much as delivered.
+ *      But when the retransmits are acked without further losses, PRR
+ *      slow starts cwnd up to ssthresh to speed up the recovery.
  */
 static void tcp_init_cwnd_reduction(struct sock *sk)
 {
@@ -2500,7 +2501,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
 }
 
 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
-                              int fast_rexmit)
+                              int fast_rexmit, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int sndcnt = 0;
@@ -2509,16 +2510,18 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
                                 (tp->packets_out - tp->sacked_out);
 
        tp->prr_delivered += newly_acked_sacked;
-       if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+       if (delta < 0) {
                u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
                               tp->prior_cwnd - 1;
                sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
-       } else {
+       } else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
+                  !(flag & FLAG_LOST_RETRANS)) {
                sndcnt = min_t(int, delta,
                               max_t(int, tp->prr_delivered - tp->prr_out,
                                     newly_acked_sacked) + 1);
+       } else {
+               sndcnt = min(delta, newly_acked_sacked);
        }
-
        sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
        tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 }
@@ -2579,7 +2582,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
        } else {
-               tcp_cwnd_reduction(sk, prior_unsacked, 0);
+               tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
        }
 }
 
@@ -2589,6 +2592,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
 
        icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
        icsk->icsk_mtup.probe_size = 0;
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
 }
 
 static void tcp_mtup_probe_success(struct sock *sk)
@@ -2608,6 +2612,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
        icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
        icsk->icsk_mtup.probe_size = 0;
        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
 }
 
 /* Do a simple retransmit without using the backoff mechanisms in
@@ -2676,7 +2681,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
        tp->prior_ssthresh = 0;
        tcp_init_undo(tp);
 
-       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+       if (!tcp_in_cwnd_reduction(sk)) {
                if (!ece_ack)
                        tp->prior_ssthresh = tcp_current_ssthresh(sk);
                tcp_init_cwnd_reduction(sk);
@@ -2736,7 +2741,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
 
 /* Undo during fast recovery after partial ACK. */
 static bool tcp_try_undo_partial(struct sock *sk, const int acked,
-                                const int prior_unsacked)
+                                const int prior_unsacked, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2752,7 +2757,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
                 * mark more packets lost or retransmit more.
                 */
                if (tp->retrans_out) {
-                       tcp_cwnd_reduction(sk, prior_unsacked, 0);
+                       tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
                        return true;
                }
 
@@ -2839,7 +2844,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                        if (tcp_is_reno(tp) && is_dupack)
                                tcp_add_reno_sack(sk);
                } else {
-                       if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+                       if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
                                return;
                        /* Partial ACK arrived. Force fast retransmit. */
                        do_lost = tcp_is_reno(tp) ||
@@ -2852,9 +2857,10 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                break;
        case TCP_CA_Loss:
                tcp_process_loss(sk, flag, is_dupack);
-               if (icsk->icsk_ca_state != TCP_CA_Open)
+               if (icsk->icsk_ca_state != TCP_CA_Open &&
+                   !(flag & FLAG_LOST_RETRANS))
                        return;
-               /* Fall through to processing in Open state. */
+               /* Change state if cwnd is undone or retransmits are lost */
        default:
                if (tcp_is_reno(tp)) {
                        if (flag & FLAG_SND_UNA_ADVANCED)
@@ -2889,7 +2895,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
 
        if (do_lost)
                tcp_update_scoreboard(sk, fast_rexmit);
-       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
+       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
        tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3563,10 +3569,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                                    &sack_state);
        acked -= tp->packets_out;
 
-       /* Advance cwnd if state allows */
-       if (tcp_may_raise_cwnd(sk, flag))
-               tcp_cong_avoid(sk, ack, acked);
-
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
                tcp_fastretrans_alert(sk, acked, prior_unsacked,
@@ -3575,6 +3577,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        if (tp->tlp_high_seq)
                tcp_process_tlp_ack(sk, ack, flag);
 
+       /* Advance cwnd if state allows */
+       if (tcp_may_raise_cwnd(sk, flag))
+               tcp_cong_avoid(sk, ack, acked);
+
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
                struct dst_entry *dst = __sk_dst_get(sk);
                if (dst)
@@ -3948,7 +3954,6 @@ void tcp_reset(struct sock *sk)
 static void tcp_fin(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       const struct dst_entry *dst;
 
        inet_csk_schedule_ack(sk);
 
@@ -3960,9 +3965,7 @@ static void tcp_fin(struct sock *sk)
        case TCP_ESTABLISHED:
                /* Move to CLOSE_WAIT */
                tcp_set_state(sk, TCP_CLOSE_WAIT);
-               dst = __sk_dst_get(sk);
-               if (!dst || !dst_metric(dst, RTAX_QUICKACK))
-                       inet_csk(sk)->icsk_ack.pingpong = 1;
+               inet_csk(sk)->icsk_ack.pingpong = 1;
                break;
 
        case TCP_CLOSE_WAIT:
index d7d4c2b79cf2f516f9e3f62c6fe4415e9bc137a0..d27eb549ced6b4bba76fcd3a4286c8ab0b41478f 100644 (file)
@@ -222,7 +222,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (err)
                goto failure;
 
-       inet_set_txhash(sk);
+       sk_set_txhash(sk);
 
        rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
                               inet->inet_sport, inet->inet_dport, sk);
@@ -1277,7 +1277,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
-       inet_set_txhash(newsk);
+       sk_set_txhash(newsk);
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
        newinet->inet_id = newtp->write_seq ^ jiffies;
@@ -1683,8 +1683,7 @@ do_time_wait:
                                                        iph->daddr, th->dest,
                                                        inet_iif(skb));
                if (sk2) {
-                       inet_twsk_deschedule(inet_twsk(sk));
-                       inet_twsk_put(inet_twsk(sk));
+                       inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
                        goto process;
                }
index a51d63a43e33af5fc751e4f0f3369b9394776975..b3d64f61d922e1ec10aa31b4e19ea0fb6c6876be 100644 (file)
@@ -461,7 +461,7 @@ void tcp_update_metrics(struct sock *sk)
                                tcp_metric_set(tm, TCP_METRIC_CWND,
                                               tp->snd_cwnd);
                }
-       } else if (tp->snd_cwnd > tp->snd_ssthresh &&
+       } else if (!tcp_in_slow_start(tp) &&
                   icsk->icsk_ca_state == TCP_CA_Open) {
                /* Cong. avoidance phase, cwnd is reliable. */
                if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
index 4bc00cb79e603553076adf750712377586f4b2fb..6d8795b066aca708df47de3c9211f36bee5eb1d4 100644 (file)
@@ -147,8 +147,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                if (!th->fin ||
                    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
 kill_with_rst:
-                       inet_twsk_deschedule(tw);
-                       inet_twsk_put(tw);
+                       inet_twsk_deschedule_put(tw);
                        return TCP_TW_RST;
                }
 
@@ -198,8 +197,7 @@ kill_with_rst:
                         */
                        if (sysctl_tcp_rfc1337 == 0) {
 kill:
-                               inet_twsk_deschedule(tw);
-                               inet_twsk_put(tw);
+                               inet_twsk_deschedule_put(tw);
                                return TCP_TW_SUCCESS;
                        }
                }
index b1c218df2c855bc56594ffdd86d75ef5e146731a..7d1efa762b75b04e982c14da36c12595b38dc880 100644 (file)
@@ -163,7 +163,6 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        const u32 now = tcp_time_stamp;
-       const struct dst_entry *dst = __sk_dst_get(sk);
 
        if (sysctl_tcp_slow_start_after_idle &&
            (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
@@ -174,9 +173,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
        /* If it is a reply for ato after last received
         * packet, enter pingpong mode.
         */
-       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
-           (!dst || !dst_metric(dst, RTAX_QUICKACK)))
-                       icsk->icsk_ack.pingpong = 1;
+       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+               icsk->icsk_ack.pingpong = 1;
 }
 
 /* Account for an ACK we sent. */
@@ -1776,7 +1774,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                goto send_now;
 
-       if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
+       if (icsk->icsk_ca_state >= TCP_CA_Recovery)
                goto send_now;
 
        /* Avoid bursty behavior by allowing defer
index 333bcb2415ffca51e06f3042ae3d94b8e21c0725..bf5ea9e9bbc1ed3c07c03f9db69b9848cf83ec8e 100644 (file)
@@ -22,7 +22,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else
                tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
index 5b752f58a90063e7468b11f2853c7c006b679e60..7149ebc820c7d87afef856ce641ac63678abafa3 100644 (file)
@@ -649,4 +649,3 @@ void tcp_init_xmit_timers(struct sock *sk)
        inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
                                  &tcp_keepalive_timer);
 }
-EXPORT_SYMBOL(tcp_init_xmit_timers);
index a6cea1d5e20d47f06eab95f3344a3e3b7c44da89..13951c4087d407b72cb5bc2ee75822203244e3f3 100644 (file)
@@ -225,7 +225,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                         */
                        diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
 
-                       if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
+                       if (diff > gamma && tcp_in_slow_start(tp)) {
                                /* Going too fast. Time to slow down
                                 * and switch to congestion avoidance.
                                 */
@@ -240,7 +240,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                                tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
                                tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
 
-                       } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
+                       } else if (tcp_in_slow_start(tp)) {
                                /* Slow start.  */
                                tcp_slow_start(tp, acked);
                        } else {
@@ -281,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                vegas->minRTT = 0x7fffffff;
        }
        /* Use normal slow start */
-       else if (tp->snd_cwnd <= tp->snd_ssthresh)
+       else if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 }
 
index 112151eeee45bff0c37ac92d78d165ba92bd4d0a..0d094b995cd96f8c5150daf586cdde0f495843f5 100644 (file)
@@ -150,7 +150,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
                veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
 
-               if (tp->snd_cwnd <= tp->snd_ssthresh) {
+               if (tcp_in_slow_start(tp)) {
                        /* Slow start.  */
                        tcp_slow_start(tp, acked);
                } else {
index 438a73aa777cf560f38a87801b03b8ce20a315b1..643f61339e7b4fc9d4dcba75c4bb772c99d39292 100644 (file)
@@ -5,16 +5,15 @@
 #   IPv6 as module will cause a CRASH if you try to unload it
 menuconfig IPV6
        tristate "The IPv6 protocol"
-       default m
+       default y
        ---help---
-         This is complemental support for the IP version 6.
-         You will still be able to do traditional IPv4 networking as well.
+         Support for IP version 6 (IPv6).
 
          For general information about IPv6, see
          <https://en.wikipedia.org/wiki/IPv6>.
-         For Linux IPv6 development information, see <http://www.linux-ipv6.org>.
-         For specific information about IPv6 under Linux, read the HOWTO at
-         <http://www.bieringer.de/linux/IPv6/>.
+         For specific information about IPv6 under Linux, see
+         Documentation/networking/ipv6.txt and read the HOWTO at
+         <http://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
 
          To compile this protocol support as a module, choose M here: the 
          module will be called ipv6.
index 21c2c818df3b8379226555268ef526c08553d00d..53e3a9d756b0d804e873c80a820383b756a0624b 100644 (file)
@@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
        .accept_ra_from_local   = 0,
+       .accept_ra_min_hop_limit= 1,
        .accept_ra_pinfo        = 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
@@ -211,7 +212,8 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .accept_ra_mtu          = 1,
        .stable_secret          = {
                .initialized = false,
-       }
+       },
+       .use_oif_addrs_only     = 0,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -236,6 +238,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
        .accept_ra_from_local   = 0,
+       .accept_ra_min_hop_limit= 1,
        .accept_ra_pinfo        = 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
@@ -253,6 +256,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .stable_secret          = {
                .initialized = false,
        },
+       .use_oif_addrs_only     = 0,
 };
 
 /* Check if a valid qdisc is available */
@@ -1358,15 +1362,96 @@ out:
        return ret;
 }
 
+static int __ipv6_dev_get_saddr(struct net *net,
+                               struct ipv6_saddr_dst *dst,
+                               struct inet6_dev *idev,
+                               struct ipv6_saddr_score *scores,
+                               int hiscore_idx)
+{
+       struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
+
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
+               int i;
+
+               /*
+                * - Tentative Address (RFC2462 section 5.4)
+                *  - A tentative address is not considered
+                *    "assigned to an interface" in the traditional
+                *    sense, unless it is also flagged as optimistic.
+                * - Candidate Source Address (section 4)
+                *  - In any case, anycast addresses, multicast
+                *    addresses, and the unspecified address MUST
+                *    NOT be included in a candidate set.
+                */
+               if ((score->ifa->flags & IFA_F_TENTATIVE) &&
+                   (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
+                       continue;
+
+               score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+
+               if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
+                            score->addr_type & IPV6_ADDR_MULTICAST)) {
+                       net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
+                                           idev->dev->name);
+                       continue;
+               }
+
+               score->rule = -1;
+               bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
+
+               for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
+                       int minihiscore, miniscore;
+
+                       minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
+                       miniscore = ipv6_get_saddr_eval(net, score, dst, i);
+
+                       if (minihiscore > miniscore) {
+                               if (i == IPV6_SADDR_RULE_SCOPE &&
+                                   score->scopedist > 0) {
+                                       /*
+                                        * special case:
+                                        * each remaining entry
+                                        * has too small (not enough)
+                                        * scope, because ifa entries
+                                        * are sorted by their scope
+                                        * values.
+                                        */
+                                       goto out;
+                               }
+                               break;
+                       } else if (minihiscore < miniscore) {
+                               if (hiscore->ifa)
+                                       in6_ifa_put(hiscore->ifa);
+
+                               in6_ifa_hold(score->ifa);
+
+                               swap(hiscore, score);
+                               hiscore_idx = 1 - hiscore_idx;
+
+                               /* restore our iterator */
+                               score->ifa = hiscore->ifa;
+
+                               break;
+                       }
+               }
+       }
+out:
+       read_unlock_bh(&idev->lock);
+       return hiscore_idx;
+}
+
 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
                       const struct in6_addr *daddr, unsigned int prefs,
                       struct in6_addr *saddr)
 {
-       struct ipv6_saddr_score scores[2],
-                               *score = &scores[0], *hiscore = &scores[1];
+       struct ipv6_saddr_score scores[2], *hiscore;
        struct ipv6_saddr_dst dst;
+       struct inet6_dev *idev;
        struct net_device *dev;
        int dst_type;
+       bool use_oif_addr = false;
+       int hiscore_idx = 0;
 
        dst_type = __ipv6_addr_type(daddr);
        dst.addr = daddr;
@@ -1375,105 +1460,50 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
        dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
        dst.prefs = prefs;
 
-       hiscore->rule = -1;
-       hiscore->ifa = NULL;
+       scores[hiscore_idx].rule = -1;
+       scores[hiscore_idx].ifa = NULL;
 
        rcu_read_lock();
 
-       for_each_netdev_rcu(net, dev) {
-               struct inet6_dev *idev;
-
-               /* Candidate Source Address (section 4)
-                *  - multicast and link-local destination address,
-                *    the set of candidate source address MUST only
-                *    include addresses assigned to interfaces
-                *    belonging to the same link as the outgoing
-                *    interface.
-                * (- For site-local destination addresses, the
-                *    set of candidate source addresses MUST only
-                *    include addresses assigned to interfaces
-                *    belonging to the same site as the outgoing
-                *    interface.)
-                */
-               if (((dst_type & IPV6_ADDR_MULTICAST) ||
-                    dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
-                   dst.ifindex && dev->ifindex != dst.ifindex)
-                       continue;
-
-               idev = __in6_dev_get(dev);
-               if (!idev)
-                       continue;
-
-               read_lock_bh(&idev->lock);
-               list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
-                       int i;
-
-                       /*
-                        * - Tentative Address (RFC2462 section 5.4)
-                        *  - A tentative address is not considered
-                        *    "assigned to an interface" in the traditional
-                        *    sense, unless it is also flagged as optimistic.
-                        * - Candidate Source Address (section 4)
-                        *  - In any case, anycast addresses, multicast
-                        *    addresses, and the unspecified address MUST
-                        *    NOT be included in a candidate set.
-                        */
-                       if ((score->ifa->flags & IFA_F_TENTATIVE) &&
-                           (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
-                               continue;
-
-                       score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+       /* Candidate Source Address (section 4)
+        *  - multicast and link-local destination address,
+        *    the set of candidate source address MUST only
+        *    include addresses assigned to interfaces
+        *    belonging to the same link as the outgoing
+        *    interface.
+        * (- For site-local destination addresses, the
+        *    set of candidate source addresses MUST only
+        *    include addresses assigned to interfaces
+        *    belonging to the same site as the outgoing
+        *    interface.)
+        *  - "It is RECOMMENDED that the candidate source addresses
+        *    be the set of unicast addresses assigned to the
+        *    interface that will be used to send to the destination
+        *    (the 'outgoing' interface)." (RFC 6724)
+        */
+       if (dst_dev) {
+               idev = __in6_dev_get(dst_dev);
+               if ((dst_type & IPV6_ADDR_MULTICAST) ||
+                   dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
+                   (idev && idev->cnf.use_oif_addrs_only)) {
+                       use_oif_addr = true;
+               }
+       }
 
-                       if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
-                                    score->addr_type & IPV6_ADDR_MULTICAST)) {
-                               net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
-                                                   dev->name);
+       if (use_oif_addr) {
+               if (idev)
+                       hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
+       } else {
+               for_each_netdev_rcu(net, dev) {
+                       idev = __in6_dev_get(dev);
+                       if (!idev)
                                continue;
-                       }
-
-                       score->rule = -1;
-                       bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
-
-                       for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
-                               int minihiscore, miniscore;
-
-                               minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
-                               miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
-
-                               if (minihiscore > miniscore) {
-                                       if (i == IPV6_SADDR_RULE_SCOPE &&
-                                           score->scopedist > 0) {
-                                               /*
-                                                * special case:
-                                                * each remaining entry
-                                                * has too small (not enough)
-                                                * scope, because ifa entries
-                                                * are sorted by their scope
-                                                * values.
-                                                */
-                                               goto try_nextdev;
-                                       }
-                                       break;
-                               } else if (minihiscore < miniscore) {
-                                       if (hiscore->ifa)
-                                               in6_ifa_put(hiscore->ifa);
-
-                                       in6_ifa_hold(score->ifa);
-
-                                       swap(hiscore, score);
-
-                                       /* restore our iterator */
-                                       score->ifa = hiscore->ifa;
-
-                                       break;
-                               }
-                       }
+                       hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
                }
-try_nextdev:
-               read_unlock_bh(&idev->lock);
        }
        rcu_read_unlock();
 
+       hiscore = &scores[hiscore_idx];
        if (!hiscore->ifa)
                return -EADDRNOTAVAIL;
 
@@ -4560,6 +4590,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
        array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
        array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
+       array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
        array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
        array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
@@ -4586,6 +4617,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
        array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
        /* we omit DEVCONF_STABLE_SECRET for now */
+       array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -5455,6 +5487,13 @@ static struct addrconf_sysctl_table
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
+               {
+                       .procname       = "accept_ra_min_hop_limit",
+                       .data           = &ipv6_devconf.accept_ra_min_hop_limit,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+               },
                {
                        .procname       = "accept_ra_pinfo",
                        .data           = &ipv6_devconf.accept_ra_pinfo,
@@ -5584,6 +5623,14 @@ static struct addrconf_sysctl_table
                        .mode           = 0600,
                        .proc_handler   = addrconf_sysctl_stable_secret,
                },
+               {
+                       .procname       = "use_oif_addrs_only",
+                       .data           = &ipv6_devconf.use_oif_addrs_only,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+
+               },
                {
                        /* sentinel */
                }
index ca09bf49ac6806b399dba51399f84e47590cb9ed..bfa941fc1165002903b5a0364e5584b075469e2e 100644 (file)
@@ -107,7 +107,16 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v)
 }
 EXPORT_SYMBOL(inet6addr_notifier_call_chain);
 
-const struct ipv6_stub *ipv6_stub __read_mostly;
+static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
+                                       struct dst_entry **u2,
+                                       struct flowi6 *u3)
+{
+       return -EAFNOSUPPORT;
+}
+
+const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+       .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
+};
 EXPORT_SYMBOL_GPL(ipv6_stub);
 
 /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
index 7de52b65173fa6a1b344b13e67106ad39591ed06..44bb66bde0e2d97308c3c68a8d6b225ce04d08a8 100644 (file)
@@ -197,6 +197,7 @@ lookup_protocol:
        np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
        np->mc_loop     = 1;
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
+       np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
        sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
 
        /* Init the ipv4 part of the socket since we can have sockets
@@ -342,7 +343,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                         */
                        v4addr = LOOPBACK4_IPV6;
                        if (!(addr_type & IPV6_ADDR_MULTICAST)) {
-                               if (!(inet->freebind || inet->transparent) &&
+                               if (!net->ipv6.sysctl.ip_nonlocal_bind &&
+                                   !(inet->freebind || inet->transparent) &&
                                    !ipv6_chk_addr(net, &addr->sin6_addr,
                                                   dev, 0)) {
                                        err = -EADDRNOTAVAIL;
@@ -679,8 +681,8 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
        const struct ipv6_pinfo *np = inet6_sk(sk);
 
        if (np->rxopt.all) {
-               if ((opt->hop && (np->rxopt.bits.hopopts ||
-                                 np->rxopt.bits.ohopopts)) ||
+               if (((opt->flags & IP6SKB_HOPBYHOP) &&
+                    (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) ||
                    (ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) &&
                     np->rxopt.bits.rxflow) ||
                    (opt->srcrt && (np->rxopt.bits.srcrt ||
@@ -766,10 +768,10 @@ static int __net_init inet6_net_init(struct net *net)
        net->ipv6.sysctl.bindv6only = 0;
        net->ipv6.sysctl.icmpv6_time = 1*HZ;
        net->ipv6.sysctl.flowlabel_consistency = 1;
-       net->ipv6.sysctl.auto_flowlabels = 0;
+       net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS;
        net->ipv6.sysctl.idgen_retries = 3;
        net->ipv6.sysctl.idgen_delay = 1 * HZ;
-       net->ipv6.sysctl.flowlabel_state_ranges = 1;
+       net->ipv6.sysctl.flowlabel_state_ranges = 0;
        atomic_set(&net->ipv6.fib6_sernum, 1);
 
        err = ipv6_init_mibs(net);
index 62d908e64eeb53740d53ddfd57e26867c4e7e4d3..9aadd57808a515dda6edbf4b784aae2179604628 100644 (file)
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
        return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
 }
 
-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct sockaddr_in6     *usin = (struct sockaddr_in6 *) uaddr;
        struct inet_sock        *inet = inet_sk(sk);
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (usin->sin6_family == AF_INET) {
                if (__ipv6_only_sock(sk))
                        return -EAFNOSUPPORT;
-               err = ip4_datagram_connect(sk, uaddr, addr_len);
+               err = __ip4_datagram_connect(sk, uaddr, addr_len);
                goto ipv4_connected;
        }
 
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                sin.sin_addr.s_addr = daddr->s6_addr32[3];
                sin.sin_port = usin->sin6_port;
 
-               err = ip4_datagram_connect(sk,
-                                          (struct sockaddr *) &sin,
-                                          sizeof(sin));
+               err = __ip4_datagram_connect(sk,
+                                            (struct sockaddr *) &sin,
+                                            sizeof(sin));
 
 ipv4_connected:
                if (err)
@@ -199,11 +199,21 @@ ipv4_connected:
                      NULL);
 
        sk->sk_state = TCP_ESTABLISHED;
-       ip6_set_txhash(sk);
+       sk_set_txhash(sk);
 out:
        fl6_sock_release(flowlabel);
        return err;
 }
+
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+       int res;
+
+       lock_sock(sk);
+       res = __ip6_datagram_connect(sk, uaddr, addr_len);
+       release_sock(sk);
+       return res;
+}
 EXPORT_SYMBOL_GPL(ip6_datagram_connect);
 
 int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
@@ -558,8 +568,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
        }
 
        /* HbH is allowed only once */
-       if (np->rxopt.bits.hopopts && opt->hop) {
-               u8 *ptr = nh + opt->hop;
+       if (np->rxopt.bits.hopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+               u8 *ptr = nh + sizeof(struct ipv6hdr);
                put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
        }
 
@@ -620,8 +630,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
                int hlim = ipv6_hdr(skb)->hop_limit;
                put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
        }
-       if (np->rxopt.bits.ohopopts && opt->hop) {
-               u8 *ptr = nh + opt->hop;
+       if (np->rxopt.bits.ohopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+               u8 *ptr = nh + sizeof(struct ipv6hdr);
                put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
        }
        if (np->rxopt.bits.odstopts && opt->dst0) {
index a7bbbe45570b287eb05b9f13d0a73a830b767dd2..ce203b0402bea3b16deb34b4835cd2e89e94f899 100644 (file)
@@ -632,7 +632,7 @@ int ipv6_parse_hopopts(struct sk_buff *skb)
                return -1;
        }
 
-       opt->hop = sizeof(struct ipv6hdr);
+       opt->flags |= IP6SKB_HOPBYHOP;
        if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
                skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
                opt = IP6CB(skb);
index 713d7434c9112432f800f377925ca68534094c34..6c2b2132c8d328e4d947c3b0b8216ea40f582f90 100644 (file)
@@ -329,7 +329,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
        struct flowi6 fl2;
        int err;
 
-       err = ip6_dst_lookup(sk, &dst, fl6);
+       err = ip6_dst_lookup(net, sk, &dst, fl6);
        if (err)
                return ERR_PTR(err);
 
@@ -361,7 +361,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
        if (err)
                goto relookup_failed;
 
-       err = ip6_dst_lookup(sk, &dst2, &fl2);
+       err = ip6_dst_lookup(net, sk, &dst2, &fl2);
        if (err)
                goto relookup_failed;
 
@@ -591,7 +591,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        else if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->ucast_oif;
 
-       err = ip6_dst_lookup(sk, &dst, &fl6);
+       err = ip6_dst_lookup(net, sk, &dst, &fl6);
        if (err)
                goto out;
        dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
index b4fd96de97e61627003eff220e10bdd05a899e28..6ac8dad0138a6b41395f306bffc2d9b47d8d91bc 100644 (file)
@@ -207,7 +207,6 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
        struct sock *sk2;
        const struct hlist_nulls_node *node;
        struct inet_timewait_sock *tw = NULL;
-       int twrefcnt = 0;
 
        spin_lock(lock);
 
@@ -234,21 +233,17 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
        WARN_ON(!sk_unhashed(sk));
        __sk_nulls_add_node_rcu(sk, &head->chain);
        if (tw) {
-               twrefcnt = inet_twsk_unhash(tw);
+               sk_nulls_del_node_init_rcu((struct sock *)tw);
                NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
        }
        spin_unlock(lock);
-       if (twrefcnt)
-               inet_twsk_put(tw);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
        if (twp) {
                *twp = tw;
        } else if (tw) {
                /* Silly. Should hash-dance instead... */
-               inet_twsk_deschedule(tw);
-
-               inet_twsk_put(tw);
+               inet_twsk_deschedule_put(tw);
        }
        return 0;
 
index 55d19861ab20f4a91b6b289be7ca3b0250df4531..5693b5eb84820fceb7feb2f87345cd38b2613c6e 100644 (file)
@@ -32,6 +32,7 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/addrconf.h>
+#include <net/lwtunnel.h>
 
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
@@ -177,6 +178,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
 static void rt6_release(struct rt6_info *rt)
 {
        if (atomic_dec_and_test(&rt->rt6i_ref)) {
+               lwtstate_put(rt->rt6i_lwtstate);
                rt6_free_pcpu(rt);
                dst_free(&rt->dst);
        }
index a38d3ac0f18f6e631e3a17904bf617f7a0dfe28a..34f121812a1484e0ff23ea337ee39f35424a9628 100644 (file)
@@ -728,7 +728,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
         */
        ipv6h = ipv6_hdr(skb);
        ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
        ipv6h->hop_limit = tunnel->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
@@ -1182,7 +1182,8 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
 
        ip6_flow_hdr(ipv6h, 0,
                     ip6_make_flowlabel(dev_net(dev), skb,
-                                       t->fl.u.ip6.flowlabel, false));
+                                       t->fl.u.ip6.flowlabel, true,
+                                       &t->fl.u.ip6));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = NEXTHDR_GRE;
        ipv6h->saddr = t->parms.laddr;
index 57990c929cd8156ebac52c648deb50fd3f74ab82..adba03ac7ce9671f6dba419d163672b9520743bd 100644 (file)
@@ -45,6 +45,7 @@
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 #include <net/inet_ecn.h>
+#include <net/dst_metadata.h>
 
 int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
 {
@@ -55,7 +56,7 @@ int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
                if (ipprot && ipprot->early_demux)
                        ipprot->early_demux(skb);
        }
-       if (!skb_dst(skb))
+       if (!skb_valid_dst(skb))
                ip6_route_input(skb);
 
        return dst_input(skb);
@@ -98,7 +99,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
         * arrived via the sending interface (ethX), because of the
         * nature of scoping architecture. --yoshfuji
         */
-       IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
+       IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
 
        if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
                goto err;
index e893cd18612fcdc9e8577f0e060ffd32b5659eea..08b62047c67f311ca808533cb7a83b5caab0cfc8 100644 (file)
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
 static const struct net_offload sit_offload = {
        .callbacks = {
                .gso_segment    = ipv6_gso_segment,
-               .gro_receive    = ipv6_gro_receive,
-               .gro_complete   = ipv6_gro_complete,
        },
 };
 
index d5f7716662dbc361e93ebc443e72ee5fb7343b10..26ea4793074004d0af1026bb378860b53baa0ad2 100644 (file)
@@ -207,7 +207,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                hlimit = ip6_dst_hoplimit(dst);
 
        ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                                    np->autoflowlabel));
+                                                    np->autoflowlabel, fl6));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -881,10 +881,9 @@ out:
        return dst;
 }
 
-static int ip6_dst_lookup_tail(struct sock *sk,
+static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
                               struct dst_entry **dst, struct flowi6 *fl6)
 {
-       struct net *net = sock_net(sk);
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        struct neighbour *n;
        struct rt6_info *rt;
@@ -994,10 +993,11 @@ out_err_release:
  *
  *     It returns zero on success, or a standard errno code on error.
  */
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+                  struct flowi6 *fl6)
 {
        *dst = NULL;
-       return ip6_dst_lookup_tail(sk, dst, fl6);
+       return ip6_dst_lookup_tail(net, sk, dst, fl6);
 }
 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
 
@@ -1018,11 +1018,13 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        struct dst_entry *dst = NULL;
        int err;
 
-       err = ip6_dst_lookup_tail(sk, &dst, fl6);
+       err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
        if (err)
                return ERR_PTR(err);
        if (final_dst)
                fl6->daddr = *final_dst;
+       if (!fl6->flowi6_oif)
+               fl6->flowi6_oif = dst->dev->ifindex;
 
        return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
@@ -1050,7 +1052,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 
        dst = ip6_sk_dst_check(sk, dst, fl6);
 
-       err = ip6_dst_lookup_tail(sk, &dst, fl6);
+       err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
        if (err)
                return ERR_PTR(err);
        if (final_dst)
@@ -1647,7 +1649,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
 
        ip6_flow_hdr(hdr, v6_cork->tclass,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                       np->autoflowlabel));
+                                       np->autoflowlabel, fl6));
        hdr->hop_limit = v6_cork->hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
index 2e67b660118bf7eeaf2f08033aa759190dea0c3a..b0ab420612bcc30efd5e58a30bdfb5e730e88b0e 100644 (file)
@@ -1095,7 +1095,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
        ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
index 0a05b35a90fc946dc0de3ae76e4a682ca4438932..b3054611f88a5f69503e1a44ced1592579dfc4fd 100644 (file)
@@ -1225,18 +1225,16 @@ static void ndisc_router_discovery(struct sk_buff *skb)
 
        if (rt)
                rt6_set_expires(rt, jiffies + (HZ * lifetime));
-       if (ra_msg->icmph.icmp6_hop_limit) {
-               /* Only set hop_limit on the interface if it is higher than
-                * the current hop_limit.
-                */
-               if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+       if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+           ra_msg->icmph.icmp6_hop_limit) {
+               if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
                        in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+                       if (rt)
+                               dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+                                              ra_msg->icmph.icmp6_hop_limit);
                } else {
-                       ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+                       ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
                }
-               if (rt)
-                       dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
-                                      ra_msg->icmph.icmp6_hop_limit);
        }
 
 skip_defrtr:
@@ -1650,6 +1648,7 @@ int ndisc_rcv(struct sk_buff *skb)
 static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_notifier_change_info *change_info;
        struct net *net = dev_net(dev);
        struct inet6_dev *idev;
 
@@ -1664,6 +1663,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
                        ndisc_send_unsol_na(dev);
                in6_dev_put(idev);
                break;
+       case NETDEV_CHANGE:
+               change_info = ptr;
+               if (change_info->flags_changed & IFF_NOARP)
+                       neigh_changeaddr(&nd_tbl, dev);
+               break;
        case NETDEV_DOWN:
                neigh_ifdown(&nd_tbl, dev);
                fib6_run_gc(0, net, false);
index 3c35ced39b42b48d3b81a2bfb8e448bd2dc02e17..4e21f80228be2047bf4344cdf958777b42461bfb 100644 (file)
@@ -305,7 +305,7 @@ static void trace_packet(const struct sk_buff *skb,
 }
 #endif
 
-static inline __pure struct ip6t_entry *
+static inline struct ip6t_entry *
 ip6t_next_entry(const struct ip6t_entry *entry)
 {
        return (void *)entry + entry->next_offset;
@@ -324,12 +324,13 @@ ip6t_do_table(struct sk_buff *skb,
        const char *indev, *outdev;
        const void *table_base;
        struct ip6t_entry *e, **jumpstack;
-       unsigned int *stackptr, origptr, cpu;
+       unsigned int stackidx, cpu;
        const struct xt_table_info *private;
        struct xt_action_param acpar;
        unsigned int addend;
 
        /* Initialization */
+       stackidx = 0;
        indev = state->in ? state->in->name : nulldevname;
        outdev = state->out ? state->out->name : nulldevname;
        /* We handle fragments by dealing with the first fragment as
@@ -357,8 +358,16 @@ ip6t_do_table(struct sk_buff *skb,
        cpu        = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
-       stackptr   = per_cpu_ptr(private->stackptr, cpu);
-       origptr    = *stackptr;
+
+       /* Switch to alternate jumpstack if we're being invoked via TEE.
+        * TEE issues XT_CONTINUE verdict on original skb so we must not
+        * clobber the jumpstack.
+        *
+        * For recursion via REJECT or SYNPROXY the stack will be clobbered
+        * but it is no problem since absolute verdict is issued by these.
+        */
+       if (static_key_false(&xt_tee_enabled))
+               jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
        e = get_entry(table_base, private->hook_entry[hook]);
 
@@ -406,20 +415,16 @@ ip6t_do_table(struct sk_buff *skb,
                                        verdict = (unsigned int)(-v) - 1;
                                        break;
                                }
-                               if (*stackptr <= origptr)
+                               if (stackidx == 0)
                                        e = get_entry(table_base,
                                            private->underflow[hook]);
                                else
-                                       e = ip6t_next_entry(jumpstack[--*stackptr]);
+                                       e = ip6t_next_entry(jumpstack[--stackidx]);
                                continue;
                        }
                        if (table_base + v != ip6t_next_entry(e) &&
                            !(e->ipv6.flags & IP6T_F_GOTO)) {
-                               if (*stackptr >= private->stacksize) {
-                                       verdict = NF_DROP;
-                                       break;
-                               }
-                               jumpstack[(*stackptr)++] = e;
+                               jumpstack[stackidx++] = e;
                        }
 
                        e = get_entry(table_base, v);
@@ -437,8 +442,6 @@ ip6t_do_table(struct sk_buff *skb,
                        break;
        } while (!acpar.hotdrop);
 
-       *stackptr = origptr;
-
        xt_write_recseq_end(addend);
        local_bh_enable();
 
@@ -452,11 +455,15 @@ ip6t_do_table(struct sk_buff *skb,
 }
 
 /* Figures out from what hook each rule can be called: returns 0 if
-   there are loops.  Puts hook bitmask in comefrom. */
+ * there are loops.  Puts hook bitmask in comefrom.
+ *
+ * Keeps track of largest call depth seen and stores it in newinfo->stacksize.
+ */
 static int
-mark_source_chains(const struct xt_table_info *newinfo,
+mark_source_chains(struct xt_table_info *newinfo,
                   unsigned int valid_hooks, void *entry0)
 {
+       unsigned int calldepth, max_calldepth = 0;
        unsigned int hook;
 
        /* No recursion; use packet counter to save back ptrs (reset
@@ -470,6 +477,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
 
                /* Set initial back pointer. */
                e->counters.pcnt = pos;
+               calldepth = 0;
 
                for (;;) {
                        const struct xt_standard_target *t
@@ -531,6 +539,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                        (entry0 + pos + size);
                                e->counters.pcnt = pos;
                                pos += size;
+                               if (calldepth > 0)
+                                       --calldepth;
                        } else {
                                int newpos = t->verdict;
 
@@ -544,6 +554,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
                                                                newpos);
                                                return 0;
                                        }
+                                       if (entry0 + newpos != ip6t_next_entry(e) &&
+                                           !(e->ipv6.flags & IP6T_F_GOTO) &&
+                                           ++calldepth > max_calldepth)
+                                               max_calldepth = calldepth;
+
                                        /* This a jump; chase it. */
                                        duprintf("Jump rule %u -> %u\n",
                                                 pos, newpos);
@@ -560,6 +575,7 @@ mark_source_chains(const struct xt_table_info *newinfo,
                next:
                duprintf("Finished chain %u\n", hook);
        }
+       newinfo->stacksize = max_calldepth;
        return 1;
 }
 
@@ -839,9 +855,6 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
                if (ret != 0)
                        return ret;
                ++i;
-               if (strcmp(ip6t_get_target(iter)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
        }
 
        if (i != repl->num_entries) {
@@ -1754,9 +1767,6 @@ translate_compat_table(struct net *net,
                if (ret != 0)
                        break;
                ++i;
-               if (strcmp(ip6t_get_target(iter1)->u.user.name,
-                   XT_ERROR_TARGET) == 0)
-                       ++newinfo->stacksize;
        }
        if (ret) {
                /*
index 12331efd49cf865b2e3ce934a734af47f77928a1..567367a7517201dcc930d5be4cea51f23e22d16e 100644 (file)
@@ -35,14 +35,12 @@ MODULE_AUTHOR("Yasuyuki KOZAKAI <yasuyuki.kozakai@toshiba.co.jp>");
 MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv6");
 MODULE_LICENSE("GPL");
 
-
 static unsigned int
 reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct ip6t_reject_info *reject = par->targinfo;
        struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
 
-       pr_debug("%s: medium point\n", __func__);
        switch (reject->with) {
        case IP6T_ICMP6_NO_ROUTE:
                nf_send_unreach6(net, skb, ICMPV6_NOROUTE, par->hooknum);
@@ -65,9 +63,6 @@ reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        case IP6T_TCP_RESET:
                nf_send_reset6(net, skb, par->hooknum);
                break;
-       default:
-               net_info_ratelimited("case %u not handled yet\n", reject->with);
-               break;
        }
 
        return NF_DROP;
index 6f187c8d8a1bdf4ab27ec71ea05e865fcf782371..6d02498172c168f2716e3b71a29e3b082a93dcfb 100644 (file)
@@ -348,7 +348,7 @@ found:
        fq->ecn |= ecn;
        if (payload_len > fq->q.max_size)
                fq->q.max_size = payload_len;
-       add_frag_mem_limit(&fq->q, skb->truesize);
+       add_frag_mem_limit(fq->q.net, skb->truesize);
 
        /* The first fragment.
         * nhoffset is obtained from the first fragment, of course.
@@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
                clone->ip_summed = head->ip_summed;
 
                NFCT_FRAG6_CB(clone)->orig = NULL;
-               add_frag_mem_limit(&fq->q, clone->truesize);
+               add_frag_mem_limit(fq->q.net, clone->truesize);
        }
 
        /* We have to remove fragment header from datagram and to relocate
@@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
                        head->csum = csum_add(head->csum, fp->csum);
                head->truesize += fp->truesize;
        }
-       sub_frag_mem_limit(&fq->q, head->truesize);
+       sub_frag_mem_limit(fq->q.net, head->truesize);
 
        head->ignore_df = 1;
        head->next = NULL;
index a45db0b4785c1e89f523ce28cb8e4231fbbc85b9..267fb8d5876e169f27e0e9a595dc89a20cfbea4e 100644 (file)
@@ -39,12 +39,9 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
        if (skb->nfct)
                zone = nf_ct_zone((struct nf_conn *)skb->nfct);
 #endif
-
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-       if (skb->nf_bridge &&
-           skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
+       if (nf_bridge_in_prerouting(skb))
                return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
-#endif
+
        if (hooknum == NF_INET_PRE_ROUTING)
                return IP6_DEFRAG_CONNTRACK_IN + zone;
        else
index ca4700cb26c4feec258c8e5034389522125b113e..fdbada1569a37348b47b60769f7d679741b21d0a 100644 (file)
@@ -295,7 +295,8 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                 * unspecified and mapped address have a v4 equivalent.
                 */
                v4addr = LOOPBACK4_IPV6;
-               if (!(addr_type & IPV6_ADDR_MULTICAST)) {
+               if (!(addr_type & IPV6_ADDR_MULTICAST) &&
+                   !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
                        err = -EADDRNOTAVAIL;
                        if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
                                           dev, 0)) {
index 8ffa2c8cce774e8398a031ab90c69a3ed2934a6a..f1159bb76e0a54fb55a3e8e382a6cb80c7a929f9 100644 (file)
@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
 
        IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 
-       if (fq->q.flags & INET_FRAG_EVICTED)
+       if (inet_frag_evicting(&fq->q))
                goto out_rcu_unlock;
 
        IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
@@ -330,7 +330,7 @@ found:
        fq->q.stamp = skb->tstamp;
        fq->q.meat += skb->len;
        fq->ecn |= ecn;
-       add_frag_mem_limit(&fq->q, skb->truesize);
+       add_frag_mem_limit(fq->q.net, skb->truesize);
 
        /* The first fragment.
         * nhoffset is obtained from the first fragment, of course.
@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
                head->len -= clone->len;
                clone->csum = 0;
                clone->ip_summed = head->ip_summed;
-               add_frag_mem_limit(&fq->q, clone->truesize);
+               add_frag_mem_limit(fq->q.net, clone->truesize);
        }
 
        /* We have to remove fragment header from datagram and to relocate
@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
                }
                fp = next;
        }
-       sub_frag_mem_limit(&fq->q, sum_truesize);
+       sub_frag_mem_limit(fq->q.net, sum_truesize);
 
        head->next = NULL;
        head->dev = dev;
index 6090969937f8b6809f74c3d03f29a0703089eff1..54fccf0d705ddee83e3ba1e1b655fc58cd5c38a4 100644 (file)
@@ -58,6 +58,7 @@
 #include <net/netevent.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <net/lwtunnel.h>
 
 #include <asm/uaccess.h>
 
@@ -544,6 +545,7 @@ static void rt6_probe_deferred(struct work_struct *w)
 
 static void rt6_probe(struct rt6_info *rt)
 {
+       struct __rt6_probe_work *work;
        struct neighbour *neigh;
        /*
         * Okay, this does not seem to be appropriate
@@ -558,34 +560,33 @@ static void rt6_probe(struct rt6_info *rt)
        rcu_read_lock_bh();
        neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
        if (neigh) {
-               write_lock(&neigh->lock);
                if (neigh->nud_state & NUD_VALID)
                        goto out;
-       }
-
-       if (!neigh ||
-           time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-               struct __rt6_probe_work *work;
 
+               work = NULL;
+               write_lock(&neigh->lock);
+               if (!(neigh->nud_state & NUD_VALID) &&
+                   time_after(jiffies,
+                              neigh->updated +
+                              rt->rt6i_idev->cnf.rtr_probe_interval)) {
+                       work = kmalloc(sizeof(*work), GFP_ATOMIC);
+                       if (work)
+                               __neigh_set_probe_once(neigh);
+               }
+               write_unlock(&neigh->lock);
+       } else {
                work = kmalloc(sizeof(*work), GFP_ATOMIC);
+       }
 
-               if (neigh && work)
-                       __neigh_set_probe_once(neigh);
-
-               if (neigh)
-                       write_unlock(&neigh->lock);
+       if (work) {
+               INIT_WORK(&work->work, rt6_probe_deferred);
+               work->target = rt->rt6i_gateway;
+               dev_hold(rt->dst.dev);
+               work->dev = rt->dst.dev;
+               schedule_work(&work->work);
+       }
 
-               if (work) {
-                       INIT_WORK(&work->work, rt6_probe_deferred);
-                       work->target = rt->rt6i_gateway;
-                       dev_hold(rt->dst.dev);
-                       work->dev = rt->dst.dev;
-                       schedule_work(&work->work);
-               }
-       } else {
 out:
-               write_unlock(&neigh->lock);
-       }
        rcu_read_unlock_bh();
 }
 #else
@@ -1770,6 +1771,18 @@ int ip6_route_add(struct fib6_config *cfg)
 
        rt->dst.output = ip6_output;
 
+       if (cfg->fc_encap) {
+               struct lwtunnel_state *lwtstate;
+
+               err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+                                          cfg->fc_encap, &lwtstate);
+               if (err)
+                       goto out;
+               rt->rt6i_lwtstate = lwtstate_get(lwtstate);
+               if (lwtunnel_output_redirect(rt->rt6i_lwtstate))
+                       rt->dst.output = lwtunnel_output6;
+       }
+
        ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
        rt->rt6i_dst.plen = cfg->fc_dst_len;
        if (rt->rt6i_dst.plen == 128)
@@ -2147,6 +2160,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
 #endif
        rt->rt6i_prefsrc = ort->rt6i_prefsrc;
        rt->rt6i_table = ort->rt6i_table;
+       rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate);
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2595,6 +2609,8 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
        [RTA_PREF]              = { .type = NLA_U8 },
+       [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
+       [RTA_ENCAP]             = { .type = NLA_NESTED },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2689,6 +2705,12 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
                cfg->fc_flags |= RTF_PREF(pref);
        }
 
+       if (tb[RTA_ENCAP])
+               cfg->fc_encap = tb[RTA_ENCAP];
+
+       if (tb[RTA_ENCAP_TYPE])
+               cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+
        err = 0;
 errout:
        return err;
@@ -2721,6 +2743,10 @@ beginning:
                                r_cfg.fc_gateway = nla_get_in6_addr(nla);
                                r_cfg.fc_flags |= RTF_GATEWAY;
                        }
+                       r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+                       nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+                       if (nla)
+                               r_cfg.fc_encap_type = nla_get_u16(nla);
                }
                err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
                if (err) {
@@ -2783,7 +2809,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                return ip6_route_add(&cfg);
 }
 
-static inline size_t rt6_nlmsg_size(void)
+static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
 {
        return NLMSG_ALIGN(sizeof(struct rtmsg))
               + nla_total_size(16) /* RTA_SRC */
@@ -2797,7 +2823,8 @@ static inline size_t rt6_nlmsg_size(void)
               + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
               + nla_total_size(sizeof(struct rta_cacheinfo))
               + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
-              + nla_total_size(1); /* RTA_PREF */
+              + nla_total_size(1) /* RTA_PREF */
+              + lwtunnel_get_encap_size(rt->rt6i_lwtstate);
 }
 
 static int rt6_fill_node(struct net *net,
@@ -2945,6 +2972,8 @@ static int rt6_fill_node(struct net *net,
        if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
                goto nla_put_failure;
 
+       lwtunnel_fill_encap(skb, rt->rt6i_lwtstate);
+
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -3071,7 +3100,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
        err = -ENOBUFS;
        seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 
-       skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
+       skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
        if (!skb)
                goto errout;
 
index 4e705add4f187c69b1b11202d47808308e87cf80..45243bbe52536d523a7aff0b636a72b8f2e3daf5 100644 (file)
@@ -17,6 +17,9 @@
 #include <net/inet_frag.h>
 
 static int one = 1;
+static int auto_flowlabels_min;
+static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
+
 
 static struct ctl_table ipv6_table_template[] = {
        {
@@ -45,7 +48,9 @@ static struct ctl_table ipv6_table_template[] = {
                .data           = &init_net.ipv6.sysctl.auto_flowlabels,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &auto_flowlabels_min,
+               .extra2         = &auto_flowlabels_max
        },
        {
                .procname       = "fwmark_reflect",
@@ -75,6 +80,13 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "ip_nonlocal_bind",
+               .data           = &init_net.ipv6.sysctl.ip_nonlocal_bind,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        { }
 };
 
@@ -117,6 +129,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
        ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
        ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
        ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
+       ipv6_table[8].data = &net->ipv6.sysctl.ip_nonlocal_bind;
 
        ipv6_route_table = ipv6_route_sysctl_init(net);
        if (!ipv6_route_table)
index 6748c4277affad71cd721e3a985af10c31c047ad..52dd0d9974d6c8dbaa4961434211eda2f55b6482 100644 (file)
@@ -276,7 +276,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (err)
                goto late_failure;
 
-       ip6_set_txhash(sk);
+       sk_set_txhash(sk);
 
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
@@ -1090,7 +1090,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
        newsk->sk_bound_dev_if = ireq->ir_iif;
 
-       ip6_set_txhash(newsk);
+       sk_set_txhash(newsk);
 
        /* Now IPv6 options...
 
@@ -1481,8 +1481,7 @@ do_time_wait:
                                            ntohs(th->dest), tcp_v6_iif(skb));
                if (sk2) {
                        struct inet_timewait_sock *tw = inet_twsk(sk);
-                       inet_twsk_deschedule(tw);
-                       inet_twsk_put(tw);
+                       inet_twsk_deschedule_put(tw);
                        sk = sk2;
                        tcp_v6_restore_cb(skb);
                        goto process;
index 8fd9febaa5bad8150bdf632b97221a140e0e3cbd..8dab4e569571dd34096104f87e8ed27faa7d5522 100644 (file)
@@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
                if (signal_pending(current))
                        break;
                rc = 0;
-               if (sk_wait_data(sk, &timeo))
+               if (sk_wait_data(sk, &timeo, NULL))
                        break;
        }
        return rc;
@@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                        release_sock(sk);
                        lock_sock(sk);
                } else
-                       sk_wait_data(sk, &timeo);
+                       sk_wait_data(sk, &timeo, NULL);
 
                if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
                        net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
index 317c4662e544679ab37dcc8cfa92fc8108c4820b..f7ba51e8b4cafbf720c5ee3096c1102cbf2a4438 100644 (file)
@@ -44,6 +44,49 @@ static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
        ieee802154_if_remove(sdata);
 }
 
+#ifdef CONFIG_PM
+static int ieee802154_suspend(struct wpan_phy *wpan_phy)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+       if (!local->open_count)
+               goto suspend;
+
+       ieee802154_stop_queue(&local->hw);
+       synchronize_net();
+
+       /* stop hardware - this must stop RX */
+       ieee802154_stop_device(local);
+
+suspend:
+       local->suspended = true;
+       return 0;
+}
+
+static int ieee802154_resume(struct wpan_phy *wpan_phy)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+       int ret;
+
+       /* nothing to do if HW shouldn't run */
+       if (!local->open_count)
+               goto wake_up;
+
+       /* restart hardware */
+       ret = drv_start(local);
+       if (ret)
+               return ret;
+
+wake_up:
+       ieee802154_wake_queue(&local->hw);
+       local->suspended = false;
+       return 0;
+}
+#else
+#define ieee802154_suspend NULL
+#define ieee802154_resume NULL
+#endif
+
 static int
 ieee802154_add_iface(struct wpan_phy *phy, const char *name,
                     unsigned char name_assign_type,
@@ -145,13 +188,18 @@ static int
 ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                      __le16 pan_id)
 {
+       int ret;
+
        ASSERT_RTNL();
 
        if (wpan_dev->pan_id == pan_id)
                return 0;
 
-       wpan_dev->pan_id = pan_id;
-       return 0;
+       ret = mac802154_wpan_update_llsec(wpan_dev->netdev);
+       if (!ret)
+               wpan_dev->pan_id = pan_id;
+
+       return ret;
 }
 
 static int
@@ -227,6 +275,8 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 const struct cfg802154_ops mac802154_config_ops = {
        .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
        .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
+       .suspend = ieee802154_suspend,
+       .resume = ieee802154_resume,
        .add_virtual_intf = ieee802154_add_iface,
        .del_virtual_intf = ieee802154_del_iface,
        .set_channel = ieee802154_set_channel,
index 34755d5751a4681c65d09cb7487e5b98350cedfe..56ccffa3f2bfc7731adfaabb1026ef7e8af68d32 100644 (file)
@@ -56,9 +56,13 @@ struct ieee802154_local {
        struct hrtimer ifs_timer;
 
        bool started;
+       bool suspended;
 
        struct tasklet_struct tasklet;
        struct sk_buff_head skb_queue;
+
+       struct sk_buff *tx_skb;
+       struct work_struct tx_work;
 };
 
 enum {
@@ -94,8 +98,6 @@ struct ieee802154_sub_if_data {
        struct mac802154_llsec sec;
 };
 
-#define MAC802154_CHAN_NONE            0xff /* No channel is assigned */
-
 /* utility functions/constants */
 extern const void *const mac802154_wpan_phy_privid; /*  for wpan_phy privid */
 
@@ -125,6 +127,8 @@ ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata)
 
 extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
 
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
+void ieee802154_xmit_worker(struct work_struct *work);
 netdev_tx_t
 ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t
@@ -167,6 +171,8 @@ void mac802154_get_table(struct net_device *dev,
                         struct ieee802154_llsec_table **t);
 void mac802154_unlock_table(struct net_device *dev);
 
+int mac802154_wpan_update_llsec(struct net_device *dev);
+
 /* interface handling */
 int ieee802154_iface_init(void);
 void ieee802154_iface_exit(void);
@@ -176,5 +182,6 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
                  unsigned char name_assign_type, enum nl802154_iftype type,
                  __le64 extended_addr);
 void ieee802154_remove_interfaces(struct ieee802154_local *local);
+void ieee802154_stop_device(struct ieee802154_local *local);
 
 #endif /* __IEEE802154_I_H */
index 8b698246a51b6d304c209442ca0ecd0c3e652c79..416de903e46757cfead3fe54106efa07ce6e6245 100644 (file)
@@ -30,7 +30,7 @@
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-static int mac802154_wpan_update_llsec(struct net_device *dev)
+int mac802154_wpan_update_llsec(struct net_device *dev)
 {
        struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
        struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
@@ -314,11 +314,8 @@ static int mac802154_slave_close(struct net_device *dev)
 
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
-       if (!local->open_count) {
-               flush_workqueue(local->workqueue);
-               hrtimer_cancel(&local->ifs_timer);
-               drv_stop(local);
-       }
+       if (!local->open_count)
+               ieee802154_stop_device(local);
 
        return 0;
 }
@@ -471,6 +468,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                       enum nl802154_iftype type)
 {
        struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+       int ret;
        u8 tmp;
 
        /* set some type-dependent values */
@@ -505,6 +503,10 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                mutex_init(&sdata->sec_mtx);
 
                mac802154_llsec_init(&sdata->sec);
+               ret = mac802154_wpan_update_llsec(sdata->dev);
+               if (ret < 0)
+                       return ret;
+
                break;
        case NL802154_IFTYPE_MONITOR:
                sdata->dev->destructor = free_netdev;
index 356b346e1ee86fdeadebf7be5d318c70dbc0d969..9e55431b9a5cc0baf0c40fa9e7a96c3381617ad5 100644 (file)
@@ -40,7 +40,7 @@ static void ieee802154_tasklet_handler(unsigned long data)
                         * netstack.
                         */
                        skb->pkt_type = 0;
-                       ieee802154_rx(&local->hw, skb);
+                       ieee802154_rx(local, skb);
                        break;
                default:
                        WARN(1, "mac802154: Packet is of unknown type %d\n",
@@ -58,11 +58,9 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
        struct ieee802154_local *local;
        size_t priv_size;
 
-       if (!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
-           !ops->start || !ops->stop || !ops->set_channel) {
-               pr_err("undefined IEEE802.15.4 device operations\n");
+       if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
+                   !ops->start || !ops->stop || !ops->set_channel))
                return NULL;
-       }
 
        /* Ensure 32-byte alignment of our private data and hw private data.
         * We use the wpan_phy priv data for both our ieee802154_local and for
@@ -107,6 +105,8 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
 
        skb_queue_head_init(&local->skb_queue);
 
+       INIT_WORK(&local->tx_work, ieee802154_xmit_worker);
+
        /* init supported flags with 802.15.4 default ranges */
        phy->supported.max_minbe = 8;
        phy->supported.min_maxbe = 3;
index d93ad2d4a4fc2a8cf103d0a87337e2eb115fddd0..d1c33c1d6b9b3dcd74dc077b5de5b5556a18901e 100644 (file)
@@ -246,13 +246,15 @@ ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb)
        }
 }
 
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
 {
-       struct ieee802154_local *local = hw_to_local(hw);
        u16 crc;
 
        WARN_ON_ONCE(softirq_count() == 0);
 
+       if (local->suspended)
+               goto drop;
+
        /* TODO: When a transceiver omits the checksum here, we
         * add an own calculated one. This is currently an ugly
         * solution because the monitor needs a crc here.
@@ -273,8 +275,7 @@ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
                crc = crc_ccitt(0, skb->data, skb->len);
                if (crc) {
                        rcu_read_unlock();
-                       kfree_skb(skb);
-                       return;
+                       goto drop;
                }
        }
        /* remove crc */
@@ -283,8 +284,11 @@ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
        __ieee802154_rx_handle_packet(local, skb);
 
        rcu_read_unlock();
+
+       return;
+drop:
+       kfree_skb(skb);
 }
-EXPORT_SYMBOL(ieee802154_rx);
 
 void
 ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
index c62e95695c7843947c8643cb268f95f2e64c3da9..7ed439172f30809d59fb5673131957ca8c25c56c 100644 (file)
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
- * packets through the workqueue.
- */
-struct ieee802154_xmit_cb {
-       struct sk_buff *skb;
-       struct work_struct work;
-       struct ieee802154_local *local;
-};
-
-static struct ieee802154_xmit_cb ieee802154_xmit_cb;
-
-static void ieee802154_xmit_worker(struct work_struct *work)
+void ieee802154_xmit_worker(struct work_struct *work)
 {
-       struct ieee802154_xmit_cb *cb =
-               container_of(work, struct ieee802154_xmit_cb, work);
-       struct ieee802154_local *local = cb->local;
-       struct sk_buff *skb = cb->skb;
+       struct ieee802154_local *local =
+               container_of(work, struct ieee802154_local, tx_work);
+       struct sk_buff *skb = local->tx_skb;
        struct net_device *dev = skb->dev;
        int res;
 
@@ -106,11 +94,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
                dev->stats.tx_packets++;
                dev->stats.tx_bytes += skb->len;
        } else {
-               INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker);
-               ieee802154_xmit_cb.skb = skb;
-               ieee802154_xmit_cb.local = local;
-
-               queue_work(local->workqueue, &ieee802154_xmit_cb.work);
+               local->tx_skb = skb;
+               queue_work(local->workqueue, &local->tx_work);
        }
 
        return NETDEV_TX_OK;
index 583435f3893037e45d4a5879b66b4cda3bb6f27f..f9fd0957ab67f256d10e80563bb68ce88334f911 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include "ieee802154_i.h"
+#include "driver-ops.h"
 
 /* privid for wpan_phys to determine whether they belong to us or not */
 const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
@@ -92,3 +93,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
        dev_consume_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee802154_xmit_complete);
+
+void ieee802154_stop_device(struct ieee802154_local *local)
+{
+       flush_workqueue(local->workqueue);
+       hrtimer_cancel(&local->ifs_timer);
+       drv_stop(local);
+}
index 17bde799c8548e46a791fabc8b9619c57f2b2f99..5c467ef973114c49b352ac64ed248e4199d3d0f3 100644 (file)
@@ -24,7 +24,13 @@ config NET_MPLS_GSO
 
 config MPLS_ROUTING
        tristate "MPLS: routing support"
-       help
+       ---help---
         Add support for forwarding of mpls packets.
 
+config MPLS_IPTUNNEL
+       tristate "MPLS: IP over MPLS tunnel support"
+       depends on LWTUNNEL && MPLS_ROUTING
+       ---help---
+        mpls ip tunnel support.
+
 endif # MPLS
index 65bbe68c72e66ad44463ed465eaafe01e4c2bd10..9ca92362501653463429b0254d999782f2b6c450 100644 (file)
@@ -3,5 +3,6 @@
 #
 obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
 obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o
+obj-$(CONFIG_MPLS_IPTUNNEL) += mpls_iptunnel.o
 
 mpls_router-y := af_mpls.o
index 1f93a5978f2ad43fc81a16427e34d07ca2c0f34e..b6b9a6c4e7849a35fa2161b7eafd41315b30208b 100644 (file)
 #include <net/ip_fib.h>
 #include <net/netevent.h>
 #include <net/netns/generic.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
 #include "internal.h"
 
 #define LABEL_NOT_SPECIFIED (1<<20)
@@ -58,10 +62,11 @@ static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
        return rcu_dereference_rtnl(dev->mpls_ptr);
 }
 
-static bool mpls_output_possible(const struct net_device *dev)
+bool mpls_output_possible(const struct net_device *dev)
 {
        return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
 }
+EXPORT_SYMBOL_GPL(mpls_output_possible);
 
 static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
 {
@@ -69,13 +74,14 @@ static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
        return rt->rt_labels * sizeof(struct mpls_shim_hdr);
 }
 
-static unsigned int mpls_dev_mtu(const struct net_device *dev)
+unsigned int mpls_dev_mtu(const struct net_device *dev)
 {
        /* The amount of data the layer 2 frame can hold */
        return dev->mtu;
 }
+EXPORT_SYMBOL_GPL(mpls_dev_mtu);
 
-static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
        if (skb->len <= mtu)
                return false;
@@ -85,6 +91,7 @@ static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 
        return true;
 }
+EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 
 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
                        struct mpls_entry_decoded dec)
@@ -286,7 +293,7 @@ static void mpls_notify_route(struct net *net, unsigned index,
        struct mpls_route *rt = new ? new : old;
        unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
        /* Ignore reserved labels for now */
-       if (rt && (index >= 16))
+       if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
                rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
 }
 
@@ -320,13 +327,98 @@ static unsigned find_free_label(struct net *net)
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
        platform_labels = net->mpls.platform_labels;
-       for (index = 16; index < platform_labels; index++) {
+       for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
+            index++) {
                if (!rtnl_dereference(platform_label[index]))
                        return index;
        }
        return LABEL_NOT_SPECIFIED;
 }
 
+#if IS_ENABLED(CONFIG_INET)
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+       struct net_device *dev = NULL;
+       struct rtable *rt;
+       struct in_addr daddr;
+
+       memcpy(&daddr, addr, sizeof(struct in_addr));
+       rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
+       if (IS_ERR(rt))
+               goto errout;
+
+       dev = rt->dst.dev;
+       dev_hold(dev);
+
+       ip_rt_put(rt);
+
+       return dev;
+errout:
+       return ERR_PTR(-ENODEV);
+}
+#else
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+       return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+       struct net_device *dev = NULL;
+       struct dst_entry *dst;
+       struct flowi6 fl6;
+       int err;
+
+       if (!ipv6_stub)
+               return ERR_PTR(-EAFNOSUPPORT);
+
+       memset(&fl6, 0, sizeof(fl6));
+       memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
+       err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
+       if (err)
+               goto errout;
+
+       dev = dst->dev;
+       dev_hold(dev);
+       dst_release(dst);
+
+       return dev;
+
+errout:
+       return ERR_PTR(err);
+}
+#else
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+       return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+static struct net_device *find_outdev(struct net *net,
+                                     struct mpls_route_config *cfg)
+{
+       struct net_device *dev = NULL;
+
+       if (!cfg->rc_ifindex) {
+               switch (cfg->rc_via_table) {
+               case NEIGH_ARP_TABLE:
+                       dev = inet_fib_lookup_dev(net, cfg->rc_via);
+                       break;
+               case NEIGH_ND_TABLE:
+                       dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+                       break;
+               case NEIGH_LINK_TABLE:
+                       break;
+               }
+       } else {
+               dev = dev_get_by_index(net, cfg->rc_ifindex);
+       }
+
+       return dev;
+}
+
 static int mpls_route_add(struct mpls_route_config *cfg)
 {
        struct mpls_route __rcu **platform_label;
@@ -345,8 +437,8 @@ static int mpls_route_add(struct mpls_route_config *cfg)
                index = find_free_label(net);
        }
 
-       /* The first 16 labels are reserved, and may not be set */
-       if (index < 16)
+       /* Reserved labels may not be set */
+       if (index < MPLS_LABEL_FIRST_UNRESERVED)
                goto errout;
 
        /* The full 20 bit range may not be supported. */
@@ -357,10 +449,12 @@ static int mpls_route_add(struct mpls_route_config *cfg)
        if (cfg->rc_output_labels > MAX_NEW_LABELS)
                goto errout;
 
-       err = -ENODEV;
-       dev = dev_get_by_index(net, cfg->rc_ifindex);
-       if (!dev)
+       dev = find_outdev(net, cfg);
+       if (IS_ERR(dev)) {
+               err = PTR_ERR(dev);
+               dev = NULL;
                goto errout;
+       }
 
        /* Ensure this is a supported device */
        err = -EINVAL;
@@ -423,8 +517,8 @@ static int mpls_route_del(struct mpls_route_config *cfg)
 
        index = cfg->rc_label;
 
-       /* The first 16 labels are reserved, and may not be removed */
-       if (index < 16)
+       /* Reserved labels may not be removed */
+       if (index < MPLS_LABEL_FIRST_UNRESERVED)
                goto errout;
 
        /* The full 20 bit range may not be supported */
@@ -626,6 +720,7 @@ int nla_put_labels(struct sk_buff *skb, int attrtype,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nla_put_labels);
 
 int nla_get_labels(const struct nlattr *nla,
                   u32 max_labels, u32 *labels, u32 label[])
@@ -671,6 +766,7 @@ int nla_get_labels(const struct nlattr *nla,
        *labels = nla_labels;
        return 0;
 }
+EXPORT_SYMBOL_GPL(nla_get_labels);
 
 static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
                               struct mpls_route_config *cfg)
@@ -740,8 +836,8 @@ static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
                                           &cfg->rc_label))
                                goto errout;
 
-                       /* The first 16 labels are reserved, and may not be set */
-                       if (cfg->rc_label < 16)
+                       /* Reserved labels may not be set */
+                       if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
                                goto errout;
 
                        break;
@@ -866,8 +962,8 @@ static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
        ASSERT_RTNL();
 
        index = cb->args[0];
-       if (index < 16)
-               index = 16;
+       if (index < MPLS_LABEL_FIRST_UNRESERVED)
+               index = MPLS_LABEL_FIRST_UNRESERVED;
 
        platform_label = rtnl_dereference(net->mpls.platform_label);
        platform_labels = net->mpls.platform_labels;
index 8cabeb5a1cb928c856c037c5994116df8547fb71..2681a4ba6c375f3faf83498150350ddea7392ccc 100644 (file)
@@ -50,7 +50,12 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *
        return result;
 }
 
-int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels, const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
+                  const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+                  u32 label[]);
+bool mpls_output_possible(const struct net_device *dev);
+unsigned int mpls_dev_mtu(const struct net_device *dev);
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
 
 #endif /* MPLS_INTERNAL_H */
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
new file mode 100644 (file)
index 0000000..276f8c9
--- /dev/null
@@ -0,0 +1,233 @@
+/*
+ * mpls tunnels        An implementation mpls tunnels using the light weight tunnel
+ *             infrastructure
+ *
+ * Authors:    Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/lwtunnel.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include <net/ip6_fib.h>
+#include <net/route.h>
+#include <net/mpls_iptunnel.h>
+#include <linux/mpls_iptunnel.h>
+#include "internal.h"
+
+static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
+       [MPLS_IPTUNNEL_DST]     = { .type = NLA_U32 },
+};
+
+static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
+{
+       /* The size of the layer 2.5 labels to be added for this route */
+       return en->labels * sizeof(struct mpls_shim_hdr);
+}
+
+int mpls_output(struct sock *sk, struct sk_buff *skb)
+{
+       struct mpls_iptunnel_encap *tun_encap_info;
+       struct mpls_shim_hdr *hdr;
+       struct net_device *out_dev;
+       unsigned int hh_len;
+       unsigned int new_header_size;
+       unsigned int mtu;
+       struct dst_entry *dst = skb_dst(skb);
+       struct rtable *rt = NULL;
+       struct rt6_info *rt6 = NULL;
+       struct lwtunnel_state *lwtstate = NULL;
+       int err = 0;
+       bool bos;
+       int i;
+       unsigned int ttl;
+
+       /* Obtain the ttl */
+       if (skb->protocol == htons(ETH_P_IP)) {
+               ttl = ip_hdr(skb)->ttl;
+               rt = (struct rtable *)dst;
+               lwtstate = rt->rt_lwtstate;
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               ttl = ipv6_hdr(skb)->hop_limit;
+               rt6 = (struct rt6_info *)dst;
+               lwtstate = rt6->rt6i_lwtstate;
+       } else {
+               goto drop;
+       }
+
+       skb_orphan(skb);
+
+       /* Find the output device */
+       out_dev = dst->dev;
+       if (!mpls_output_possible(out_dev) ||
+           !lwtstate || skb_warn_if_lro(skb))
+               goto drop;
+
+       skb_forward_csum(skb);
+
+       tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+       /* Verify the destination can hold the packet */
+       new_header_size = mpls_encap_size(tun_encap_info);
+       mtu = mpls_dev_mtu(out_dev);
+       if (mpls_pkt_too_big(skb, mtu - new_header_size))
+               goto drop;
+
+       hh_len = LL_RESERVED_SPACE(out_dev);
+       if (!out_dev->header_ops)
+               hh_len = 0;
+
+       /* Ensure there is enough space for the headers in the skb */
+       if (skb_cow(skb, hh_len + new_header_size))
+               goto drop;
+
+       skb_push(skb, new_header_size);
+       skb_reset_network_header(skb);
+
+       skb->dev = out_dev;
+       skb->protocol = htons(ETH_P_MPLS_UC);
+
+       /* Push the new labels */
+       hdr = mpls_hdr(skb);
+       bos = true;
+       for (i = tun_encap_info->labels - 1; i >= 0; i--) {
+               hdr[i] = mpls_entry_encode(tun_encap_info->label[i],
+                                          ttl, 0, bos);
+               bos = false;
+       }
+
+       if (rt)
+               err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
+                                skb);
+       else if (rt6)
+               err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+                                skb);
+       if (err)
+               net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+                                   __func__, err);
+
+       return 0;
+
+drop:
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+                           struct lwtunnel_state **ts)
+{
+       struct mpls_iptunnel_encap *tun_encap_info;
+       struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
+       struct lwtunnel_state *newts;
+       int tun_encap_info_len;
+       int ret;
+
+       ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla,
+                              mpls_iptunnel_policy);
+       if (ret < 0)
+               return ret;
+
+       if (!tb[MPLS_IPTUNNEL_DST])
+               return -EINVAL;
+
+       tun_encap_info_len = sizeof(*tun_encap_info);
+
+       newts = lwtunnel_state_alloc(tun_encap_info_len);
+       if (!newts)
+               return -ENOMEM;
+
+       newts->len = tun_encap_info_len;
+       tun_encap_info = mpls_lwtunnel_encap(newts);
+       ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
+                            &tun_encap_info->labels, tun_encap_info->label);
+       if (ret)
+               goto errout;
+       newts->type = LWTUNNEL_ENCAP_MPLS;
+       newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+
+       *ts = newts;
+
+       return 0;
+
+errout:
+       kfree(newts);
+       *ts = NULL;
+
+       return ret;
+}
+
+static int mpls_fill_encap_info(struct sk_buff *skb,
+                               struct lwtunnel_state *lwtstate)
+{
+       struct mpls_iptunnel_encap *tun_encap_info;
+       
+       tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+       if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
+                          tun_encap_info->label))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       struct mpls_iptunnel_encap *tun_encap_info;
+
+       tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+       return nla_total_size(tun_encap_info->labels * 4);
+}
+
+static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+       struct mpls_iptunnel_encap *a_hdr = mpls_lwtunnel_encap(a);
+       struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
+       int l;
+
+       if (a_hdr->labels != b_hdr->labels)
+               return 1;
+
+       for (l = 0; l < MAX_NEW_LABELS; l++)
+               if (a_hdr->label[l] != b_hdr->label[l])
+                       return 1;
+       return 0;
+}
+
+static const struct lwtunnel_encap_ops mpls_iptun_ops = {
+       .build_state = mpls_build_state,
+       .output = mpls_output,
+       .fill_encap = mpls_fill_encap_info,
+       .get_encap_size = mpls_encap_nlsize,
+       .cmp_encap = mpls_encap_cmp,
+};
+
+static int __init mpls_iptunnel_init(void)
+{
+       return lwtunnel_encap_add_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_init(mpls_iptunnel_init);
+
+static void __exit mpls_iptunnel_exit(void)
+{
+       lwtunnel_encap_del_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_exit(mpls_iptunnel_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels");
+MODULE_LICENSE("GPL v2");
index a0e54974e2c90fc581e99d5a9f7c24d99a515c16..2a5a0704245cc3cbddbc9b44debc016d8df2b84f 100644 (file)
@@ -34,6 +34,9 @@ EXPORT_SYMBOL(nf_afinfo);
 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
 
+DEFINE_PER_CPU(bool, nf_skb_duplicated);
+EXPORT_SYMBOL_GPL(nf_skb_duplicated);
+
 int nf_register_afinfo(const struct nf_afinfo *afinfo)
 {
        mutex_lock(&afinfo_mutex);
@@ -52,9 +55,6 @@ void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
 }
 EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
 
-struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
-EXPORT_SYMBOL(nf_hooks);
-
 #ifdef HAVE_JUMP_LABEL
 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
@@ -62,63 +62,166 @@ EXPORT_SYMBOL(nf_hooks_needed);
 
 static DEFINE_MUTEX(nf_hook_mutex);
 
-int nf_register_hook(struct nf_hook_ops *reg)
+static struct list_head *nf_find_hook_list(struct net *net,
+                                          const struct nf_hook_ops *reg)
 {
-       struct list_head *nf_hook_list;
-       struct nf_hook_ops *elem;
+       struct list_head *hook_list = NULL;
 
-       mutex_lock(&nf_hook_mutex);
-       switch (reg->pf) {
-       case NFPROTO_NETDEV:
+       if (reg->pf != NFPROTO_NETDEV)
+               hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
+       else if (reg->hooknum == NF_NETDEV_INGRESS) {
 #ifdef CONFIG_NETFILTER_INGRESS
-               if (reg->hooknum == NF_NETDEV_INGRESS) {
-                       BUG_ON(reg->dev == NULL);
-                       nf_hook_list = &reg->dev->nf_hooks_ingress;
-                       net_inc_ingress_queue();
-                       break;
-               }
+               if (reg->dev && dev_net(reg->dev) == net)
+                       hook_list = &reg->dev->nf_hooks_ingress;
 #endif
-               /* Fall through. */
-       default:
-               nf_hook_list = &nf_hooks[reg->pf][reg->hooknum];
-               break;
+       }
+       return hook_list;
+}
+
+struct nf_hook_entry {
+       const struct nf_hook_ops        *orig_ops;
+       struct nf_hook_ops              ops;
+};
+
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+       struct list_head *hook_list;
+       struct nf_hook_entry *entry;
+       struct nf_hook_ops *elem;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       entry->orig_ops = reg;
+       entry->ops      = *reg;
+
+       hook_list = nf_find_hook_list(net, reg);
+       if (!hook_list) {
+               kfree(entry);
+               return -ENOENT;
        }
 
-       list_for_each_entry(elem, nf_hook_list, list) {
+       mutex_lock(&nf_hook_mutex);
+       list_for_each_entry(elem, hook_list, list) {
                if (reg->priority < elem->priority)
                        break;
        }
-       list_add_rcu(&reg->list, elem->list.prev);
+       list_add_rcu(&entry->ops.list, elem->list.prev);
        mutex_unlock(&nf_hook_mutex);
+#ifdef CONFIG_NETFILTER_INGRESS
+       if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+               net_inc_ingress_queue();
+#endif
 #ifdef HAVE_JUMP_LABEL
        static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
        return 0;
 }
-EXPORT_SYMBOL(nf_register_hook);
+EXPORT_SYMBOL(nf_register_net_hook);
 
-void nf_unregister_hook(struct nf_hook_ops *reg)
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 {
+       struct list_head *hook_list;
+       struct nf_hook_entry *entry;
+       struct nf_hook_ops *elem;
+
+       hook_list = nf_find_hook_list(net, reg);
+       if (!hook_list)
+               return;
+
        mutex_lock(&nf_hook_mutex);
-       list_del_rcu(&reg->list);
-       mutex_unlock(&nf_hook_mutex);
-       switch (reg->pf) {
-       case NFPROTO_NETDEV:
-#ifdef CONFIG_NETFILTER_INGRESS
-               if (reg->hooknum == NF_NETDEV_INGRESS) {
-                       net_dec_ingress_queue();
+       list_for_each_entry(elem, hook_list, list) {
+               entry = container_of(elem, struct nf_hook_entry, ops);
+               if (entry->orig_ops == reg) {
+                       list_del_rcu(&entry->ops.list);
                        break;
                }
-               break;
-#endif
-       default:
-               break;
        }
+       mutex_unlock(&nf_hook_mutex);
+       if (&elem->list == hook_list) {
+               WARN(1, "nf_unregister_net_hook: hook not found!\n");
+               return;
+       }
+#ifdef CONFIG_NETFILTER_INGRESS
+       if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+               net_dec_ingress_queue();
+#endif
 #ifdef HAVE_JUMP_LABEL
        static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
        synchronize_net();
-       nf_queue_nf_hook_drop(reg);
+       nf_queue_nf_hook_drop(net, &entry->ops);
+       kfree(entry);
+}
+EXPORT_SYMBOL(nf_unregister_net_hook);
+
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+                         unsigned int n)
+{
+       unsigned int i;
+       int err = 0;
+
+       for (i = 0; i < n; i++) {
+               err = nf_register_net_hook(net, &reg[i]);
+               if (err)
+                       goto err;
+       }
+       return err;
+
+err:
+       if (i > 0)
+               nf_unregister_net_hooks(net, reg, i);
+       return err;
+}
+EXPORT_SYMBOL(nf_register_net_hooks);
+
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+                            unsigned int n)
+{
+       while (n-- > 0)
+               nf_unregister_net_hook(net, &reg[n]);
+}
+EXPORT_SYMBOL(nf_unregister_net_hooks);
+
+static LIST_HEAD(nf_hook_list);
+
+int nf_register_hook(struct nf_hook_ops *reg)
+{
+       struct net *net, *last;
+       int ret;
+
+       rtnl_lock();
+       for_each_net(net) {
+               ret = nf_register_net_hook(net, reg);
+               if (ret && ret != -ENOENT)
+                       goto rollback;
+       }
+       list_add_tail(&reg->list, &nf_hook_list);
+       rtnl_unlock();
+
+       return 0;
+rollback:
+       last = net;
+       for_each_net(net) {
+               if (net == last)
+                       break;
+               nf_unregister_net_hook(net, reg);
+       }
+       rtnl_unlock();
+       return ret;
+}
+EXPORT_SYMBOL(nf_register_hook);
+
+void nf_unregister_hook(struct nf_hook_ops *reg)
+{
+       struct net *net;
+
+       rtnl_lock();
+       list_del(&reg->list);
+       for_each_net(net)
+               nf_unregister_net_hook(net, reg);
+       rtnl_unlock();
 }
 EXPORT_SYMBOL(nf_unregister_hook);
 
@@ -295,8 +398,46 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
 EXPORT_SYMBOL(nf_nat_decode_session_hook);
 #endif
 
+static int nf_register_hook_list(struct net *net)
+{
+       struct nf_hook_ops *elem;
+       int ret;
+
+       rtnl_lock();
+       list_for_each_entry(elem, &nf_hook_list, list) {
+               ret = nf_register_net_hook(net, elem);
+               if (ret && ret != -ENOENT)
+                       goto out_undo;
+       }
+       rtnl_unlock();
+       return 0;
+
+out_undo:
+       list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
+               nf_unregister_net_hook(net, elem);
+       rtnl_unlock();
+       return ret;
+}
+
+static void nf_unregister_hook_list(struct net *net)
+{
+       struct nf_hook_ops *elem;
+
+       rtnl_lock();
+       list_for_each_entry(elem, &nf_hook_list, list)
+               nf_unregister_net_hook(net, elem);
+       rtnl_unlock();
+}
+
 static int __net_init netfilter_net_init(struct net *net)
 {
+       int i, h, ret;
+
+       for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
+               for (h = 0; h < NF_MAX_HOOKS; h++)
+                       INIT_LIST_HEAD(&net->nf.hooks[i][h]);
+       }
+
 #ifdef CONFIG_PROC_FS
        net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
                                                net->proc_net);
@@ -307,11 +448,16 @@ static int __net_init netfilter_net_init(struct net *net)
                return -ENOMEM;
        }
 #endif
-       return 0;
+       ret = nf_register_hook_list(net);
+       if (ret)
+               remove_proc_entry("netfilter", net->proc_net);
+
+       return ret;
 }
 
 static void __net_exit netfilter_net_exit(struct net *net)
 {
+       nf_unregister_hook_list(net);
        remove_proc_entry("netfilter", net->proc_net);
 }
 
@@ -322,12 +468,7 @@ static struct pernet_operations netfilter_net_ops = {
 
 int __init netfilter_init(void)
 {
-       int i, h, ret;
-
-       for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
-               for (h = 0; h < NF_MAX_HOOKS; h++)
-                       INIT_LIST_HEAD(&nf_hooks[i][h]);
-       }
+       int ret;
 
        ret = register_pernet_subsys(&netfilter_net_ops);
        if (ret < 0)
index 5d2b806a862e6834ff6c61aee5c0e0a899bbe4b8..38fbc194b9cb72835c497439713d23f16d99ca8c 100644 (file)
@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
                 * return *ignored=0 i.e. ICMP and NF_DROP
                 */
                sched = rcu_dereference(svc->scheduler);
-               dest = sched->schedule(svc, skb, iph);
+               if (sched) {
+                       /* read svc->sched_data after svc->scheduler */
+                       smp_rmb();
+                       dest = sched->schedule(svc, skb, iph);
+               } else {
+                       dest = NULL;
+               }
                if (!dest) {
                        IP_VS_DBG(1, "p-schedule: no dest found.\n");
                        kfree(param.pe_data);
@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
        }
 
        sched = rcu_dereference(svc->scheduler);
-       dest = sched->schedule(svc, skb, iph);
+       if (sched) {
+               /* read svc->sched_data after svc->scheduler */
+               smp_rmb();
+               dest = sched->schedule(svc, skb, iph);
+       } else {
+               dest = NULL;
+       }
        if (dest == NULL) {
                IP_VS_DBG(1, "Schedule: no dest found.\n");
                return NULL;
index 285eae3a145483c48c00493651a46a5d81656845..24c554201a766d175ed90359b6b78617acd37c64 100644 (file)
@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
        __ip_vs_dst_cache_reset(dest);
        spin_unlock_bh(&dest->dst_lock);
 
-       sched = rcu_dereference_protected(svc->scheduler, 1);
        if (add) {
                ip_vs_start_estimator(svc->net, &dest->stats);
                list_add_rcu(&dest->n_list, &svc->destinations);
                svc->num_dests++;
-               if (sched->add_dest)
+               sched = rcu_dereference_protected(svc->scheduler, 1);
+               if (sched && sched->add_dest)
                        sched->add_dest(svc, dest);
        } else {
-               if (sched->upd_dest)
+               sched = rcu_dereference_protected(svc->scheduler, 1);
+               if (sched && sched->upd_dest)
                        sched->upd_dest(svc, dest);
        }
 }
@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
                struct ip_vs_scheduler *sched;
 
                sched = rcu_dereference_protected(svc->scheduler, 1);
-               if (sched->del_dest)
+               if (sched && sched->del_dest)
                        sched->del_dest(svc, dest);
        }
 }
@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
        ip_vs_use_count_inc();
 
        /* Lookup the scheduler by 'u->sched_name' */
-       sched = ip_vs_scheduler_get(u->sched_name);
-       if (sched == NULL) {
-               pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
-               ret = -ENOENT;
-               goto out_err;
+       if (strcmp(u->sched_name, "none")) {
+               sched = ip_vs_scheduler_get(u->sched_name);
+               if (!sched) {
+                       pr_info("Scheduler module ip_vs_%s not found\n",
+                               u->sched_name);
+                       ret = -ENOENT;
+                       goto out_err;
+               }
        }
 
        if (u->pe_name && *u->pe_name) {
@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
        spin_lock_init(&svc->stats.lock);
 
        /* Bind the scheduler */
-       ret = ip_vs_bind_scheduler(svc, sched);
-       if (ret)
-               goto out_err;
-       sched = NULL;
+       if (sched) {
+               ret = ip_vs_bind_scheduler(svc, sched);
+               if (ret)
+                       goto out_err;
+               sched = NULL;
+       }
 
        /* Bind the ct retriever */
        RCU_INIT_POINTER(svc->pe, pe);
@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
 static int
 ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
 {
-       struct ip_vs_scheduler *sched, *old_sched;
+       struct ip_vs_scheduler *sched = NULL, *old_sched;
        struct ip_vs_pe *pe = NULL, *old_pe = NULL;
        int ret = 0;
 
        /*
         * Lookup the scheduler, by 'u->sched_name'
         */
-       sched = ip_vs_scheduler_get(u->sched_name);
-       if (sched == NULL) {
-               pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
-               return -ENOENT;
+       if (strcmp(u->sched_name, "none")) {
+               sched = ip_vs_scheduler_get(u->sched_name);
+               if (!sched) {
+                       pr_info("Scheduler module ip_vs_%s not found\n",
+                               u->sched_name);
+                       return -ENOENT;
+               }
        }
        old_sched = sched;
 
@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
 
        old_sched = rcu_dereference_protected(svc->scheduler, 1);
        if (sched != old_sched) {
+               if (old_sched) {
+                       ip_vs_unbind_scheduler(svc, old_sched);
+                       RCU_INIT_POINTER(svc->scheduler, NULL);
+                       /* Wait all svc->sched_data users */
+                       synchronize_rcu();
+               }
                /* Bind the new scheduler */
-               ret = ip_vs_bind_scheduler(svc, sched);
-               if (ret) {
-                       old_sched = sched;
-                       goto out;
+               if (sched) {
+                       ret = ip_vs_bind_scheduler(svc, sched);
+                       if (ret) {
+                               ip_vs_scheduler_put(sched);
+                               goto out;
+                       }
                }
-               /* Unbind the old scheduler on success */
-               ip_vs_unbind_scheduler(svc, old_sched);
        }
 
        /*
@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
                const struct ip_vs_iter *iter = seq->private;
                const struct ip_vs_dest *dest;
                struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
+               char *sched_name = sched ? sched->name : "none";
 
                if (iter->table == ip_vs_svc_table) {
 #ifdef CONFIG_IP_VS_IPV6
@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
                                           ip_vs_proto_name(svc->protocol),
                                           &svc->addr.in6,
                                           ntohs(svc->port),
-                                          sched->name);
+                                          sched_name);
                        else
 #endif
                                seq_printf(seq, "%s  %08X:%04X %s %s ",
                                           ip_vs_proto_name(svc->protocol),
                                           ntohl(svc->addr.ip),
                                           ntohs(svc->port),
-                                          sched->name,
+                                          sched_name,
                                           (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
                } else {
                        seq_printf(seq, "FWM  %08X %s %s",
-                                  svc->fwmark, sched->name,
+                                  svc->fwmark, sched_name,
                                   (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
                }
 
@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
 {
        struct ip_vs_scheduler *sched;
        struct ip_vs_kstats kstats;
+       char *sched_name;
 
        sched = rcu_dereference_protected(src->scheduler, 1);
+       sched_name = sched ? sched->name : "none";
        dst->protocol = src->protocol;
        dst->addr = src->addr.ip;
        dst->port = src->port;
        dst->fwmark = src->fwmark;
-       strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name));
+       strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
        dst->flags = src->flags;
        dst->timeout = src->timeout / HZ;
        dst->netmask = src->netmask;
@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
        struct ip_vs_flags flags = { .flags = svc->flags,
                                     .mask = ~0 };
        struct ip_vs_kstats kstats;
+       char *sched_name;
 
        nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
        if (!nl_service)
@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
        }
 
        sched = rcu_dereference_protected(svc->scheduler, 1);
+       sched_name = sched ? sched->name : "none";
        pe = rcu_dereference_protected(svc->pe, 1);
-       if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) ||
+       if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
            (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
            nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
            nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
index 199760c71f3995f88dbe73b159906cf7840cdded..a2ff7d746ebf40232cb2bb01a93c105623c33c7e 100644 (file)
@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
 
        if (sched->done_service)
                sched->done_service(svc);
-       /* svc->scheduler can not be set to NULL */
+       /* svc->scheduler can be set to NULL only by caller */
 }
 
 
@@ -137,7 +137,7 @@ struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name)
 
 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
 {
-       if (scheduler && scheduler->module)
+       if (scheduler)
                module_put(scheduler->module);
 }
 
@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
 
 void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
 {
-       struct ip_vs_scheduler *sched;
+       struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
+       char *sched_name = sched ? sched->name : "none";
 
-       sched = rcu_dereference(svc->scheduler);
        if (svc->fwmark) {
                IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
-                            sched->name, svc->fwmark, svc->fwmark, msg);
+                            sched_name, svc->fwmark, svc->fwmark, msg);
 #ifdef CONFIG_IP_VS_IPV6
        } else if (svc->af == AF_INET6) {
                IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
-                            sched->name, ip_vs_proto_name(svc->protocol),
+                            sched_name, ip_vs_proto_name(svc->protocol),
                             &svc->addr.in6, ntohs(svc->port), msg);
 #endif
        } else {
                IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
-                            sched->name, ip_vs_proto_name(svc->protocol),
+                            sched_name, ip_vs_proto_name(svc->protocol),
                             &svc->addr.ip, ntohs(svc->port), msg);
        }
 }
index b08ba9538d121bad95ab9aa579b2b37523f57dc2..d99ad93eb85508594cc5f6919a0b9eb005f19b33 100644 (file)
@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
                        pkts = atomic_add_return(1, &cp->in_pkts);
                else
                        pkts = sysctl_sync_threshold(ipvs);
-               ip_vs_sync_conn(net, cp->control, pkts);
+               ip_vs_sync_conn(net, cp, pkts);
        }
 }
 
index bf66a8657a5f7c7e1a2d3adbde3a05245c598249..258a0b0e82a293db38533a114c5c3a0bb2c03b9f 100644 (file)
@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
 
        memset(&fl4, 0, sizeof(fl4));
        fl4.daddr = daddr;
-       fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
        fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
                           FLOWI_FLAG_KNOWN_NH : 0;
 
@@ -505,6 +504,13 @@ err_put:
        return -1;
 
 err_unreach:
+       /* The ip6_link_failure function requires the dev field to be set
+        * in order to get the net (further for the sake of fwmark
+        * reflection).
+        */
+       if (!skb->dev)
+               skb->dev = skb_dst(skb)->dev;
+
        dst_link_failure(skb);
        return -1;
 }
@@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
        if (ret == NF_ACCEPT) {
                nf_reset(skb);
                skb_forward_csum(skb);
+               if (!skb->sk)
+                       skb_sender_cpu_clear(skb);
        }
        return ret;
 }
 
+/* In the event of a remote destination, it's possible that we would have
+ * matches against an old socket (particularly a TIME-WAIT socket). This
+ * causes havoc down the line (ip_local_out et. al. expect regular sockets
+ * and invalid memory accesses will happen) so simply drop the association
+ * in this case.
+*/
+static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
+{
+       /* If dev is set, the packet came from the LOCAL_IN callback and
+        * not from a local TCP socket.
+        */
+       if (skb->dev)
+               skb_orphan(skb);
+}
+
 /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
 static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
                                         struct ip_vs_conn *cp, int local)
@@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
                ip_vs_notrack(skb);
        else
                ip_vs_update_conntrack(skb, cp, 1);
+
+       /* Remove the early_demux association unless it's bound for the
+        * exact same port and address on this host after translation.
+        */
+       if (!local || cp->vport != cp->dport ||
+           !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
+               ip_vs_drop_early_demux_sk(skb);
+
        if (!local) {
                skb_forward_csum(skb);
+               if (!skb->sk)
+                       skb_sender_cpu_clear(skb);
                NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
                        NULL, skb_dst(skb)->dev, dst_output_sk);
        } else
                ret = NF_ACCEPT;
+
        return ret;
 }
 
@@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
        if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
                ip_vs_notrack(skb);
        if (!local) {
+               ip_vs_drop_early_demux_sk(skb);
                skb_forward_csum(skb);
+               if (!skb->sk)
+                       skb_sender_cpu_clear(skb);
                NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
                        NULL, skb_dst(skb)->dev, dst_output_sk);
        } else
@@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
        struct ipv6hdr *old_ipv6h = NULL;
 #endif
 
+       ip_vs_drop_early_demux_sk(skb);
+
        if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
                new_skb = skb_realloc_headroom(skb, max_headroom);
                if (!new_skb)
index 13fad8668f83d7ea4c7862b4c24d1234475ac37d..651039ad1681db0434cff21275f1bcbe3f8464bc 100644 (file)
@@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
        spin_unlock(&pcpu->lock);
 }
 
+/* Released via destroy_conntrack() */
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+{
+       struct nf_conn *tmpl;
+
+       tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
+       if (tmpl == NULL)
+               return NULL;
+
+       tmpl->status = IPS_TEMPLATE;
+       write_pnet(&tmpl->ct_net, net);
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       if (zone) {
+               struct nf_conntrack_zone *nf_ct_zone;
+
+               nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
+               if (!nf_ct_zone)
+                       goto out_free;
+               nf_ct_zone->id = zone;
+       }
+#endif
+       atomic_set(&tmpl->ct_general.use, 0);
+
+       return tmpl;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+out_free:
+       kfree(tmpl);
+       return NULL;
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
+
+static void nf_ct_tmpl_free(struct nf_conn *tmpl)
+{
+       nf_ct_ext_destroy(tmpl);
+       nf_ct_ext_free(tmpl);
+       kfree(tmpl);
+}
+
 static void
 destroy_conntrack(struct nf_conntrack *nfct)
 {
@@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
        NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
        NF_CT_ASSERT(!timer_pending(&ct->timeout));
 
+       if (unlikely(nf_ct_is_template(ct))) {
+               nf_ct_tmpl_free(ct);
+               return;
+       }
        rcu_read_lock();
        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
        if (l4proto && l4proto->destroy)
@@ -540,28 +584,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
 
-/* deletion from this larval template list happens via nf_ct_put() */
-void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
-{
-       struct ct_pcpu *pcpu;
-
-       __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
-       __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
-       nf_conntrack_get(&tmpl->ct_general);
-
-       /* add this conntrack to the (per cpu) tmpl list */
-       local_bh_disable();
-       tmpl->cpu = smp_processor_id();
-       pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
-
-       spin_lock(&pcpu->lock);
-       /* Overload tuple linked list to put us in template list. */
-       hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
-                                &pcpu->tmpl);
-       spin_unlock_bh(&pcpu->lock);
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
-
 /* Confirm a connection given skb; places it in hash table */
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
@@ -1751,7 +1773,6 @@ int nf_conntrack_init_net(struct net *net)
                spin_lock_init(&pcpu->lock);
                INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
                INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
-               INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
        }
 
        net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
index 7a17070c5dabb979c2cb90b7b62f35a79cdc92c0..b45a4223cb058a47ae2863a4166cd5085e587ca6 100644 (file)
@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
                        a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
        }
 
-       return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
+       return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
+              nf_ct_zone(a->master) == nf_ct_zone(b->master);
 }
 
 static inline int expect_matches(const struct nf_conntrack_expect *a,
index d1c23940a86ad96cddbf1aefe0747c43fddca920..6b8b0abbfab482280ae6a318f8bc58260e0b21c8 100644 (file)
@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
        }
 
        err = nf_ct_expect_related_report(exp, portid, report);
-       if (err < 0)
-               goto err_exp;
-
-       return 0;
-err_exp:
        nf_ct_expect_put(exp);
 err_ct:
        nf_ct_put(ct);
index b45da90fad329d9605b839a709b10d400e682a7e..67197731eb6835ff944c688afb942b90c9dc63a4 100644 (file)
@@ -42,6 +42,8 @@ static const char *const sctp_conntrack_names[] = {
        "SHUTDOWN_SENT",
        "SHUTDOWN_RECD",
        "SHUTDOWN_ACK_SENT",
+       "HEARTBEAT_SENT",
+       "HEARTBEAT_ACKED",
 };
 
 #define SECS  * HZ
@@ -57,6 +59,8 @@ static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
        [SCTP_CONNTRACK_SHUTDOWN_SENT]          = 300 SECS / 1000,
        [SCTP_CONNTRACK_SHUTDOWN_RECD]          = 300 SECS / 1000,
        [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]      = 3 SECS,
+       [SCTP_CONNTRACK_HEARTBEAT_SENT]         = 30 SECS,
+       [SCTP_CONNTRACK_HEARTBEAT_ACKED]        = 210 SECS,
 };
 
 #define sNO SCTP_CONNTRACK_NONE
@@ -67,6 +71,8 @@ static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = {
 #define        sSS SCTP_CONNTRACK_SHUTDOWN_SENT
 #define        sSR SCTP_CONNTRACK_SHUTDOWN_RECD
 #define        sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+#define        sHS SCTP_CONNTRACK_HEARTBEAT_SENT
+#define        sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
 #define        sIV SCTP_CONNTRACK_MAX
 
 /*
@@ -88,6 +94,10 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
                    to that of the SHUTDOWN chunk.
 CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
                    the SHUTDOWN chunk. Connection is closed.
+HEARTBEAT_SENT    - We have seen a HEARTBEAT in a new flow.
+HEARTBEAT_ACKED   - We have seen a HEARTBEAT-ACK in the direction opposite to
+                   that of the HEARTBEAT chunk. Secondary connection is
+                   established.
 */
 
 /* TODO
@@ -97,36 +107,40 @@ CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
  - Check the error type in the reply dir before transitioning from
 cookie echoed to closed.
  - Sec 5.2.4 of RFC 2960
- - Multi Homing support.
+ - Full Multi Homing support.
 */
 
 /* SCTP conntrack state transitions */
-static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
+static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
        {
 /*     ORIGINAL        */
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA},
-/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/
-/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
+/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
+/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
+/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
+/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
        },
        {
 /*     REPLY   */
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */
-/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
+/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
+/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
+/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
+/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
+/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
+/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
        }
 };
 
@@ -278,9 +292,16 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
                pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n");
                i = 8;
                break;
+       case SCTP_CID_HEARTBEAT:
+               pr_debug("SCTP_CID_HEARTBEAT");
+               i = 9;
+               break;
+       case SCTP_CID_HEARTBEAT_ACK:
+               pr_debug("SCTP_CID_HEARTBEAT_ACK");
+               i = 10;
+               break;
        default:
-               /* Other chunks like DATA, SACK, HEARTBEAT and
-               its ACK do not cause a change in state */
+               /* Other chunks like DATA or SACK do not change the state */
                pr_debug("Unknown chunk type, Will stay in %s\n",
                         sctp_conntrack_names[cur_state]);
                return cur_state;
@@ -329,6 +350,8 @@ static int sctp_packet(struct nf_conn *ct,
            !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
            !test_bit(SCTP_CID_ABORT, map) &&
            !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
+           !test_bit(SCTP_CID_HEARTBEAT, map) &&
+           !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
            sh->vtag != ct->proto.sctp.vtag[dir]) {
                pr_debug("Verification tag check failed\n");
                goto out;
@@ -357,6 +380,16 @@ static int sctp_packet(struct nf_conn *ct,
                        /* Sec 8.5.1 (D) */
                        if (sh->vtag != ct->proto.sctp.vtag[dir])
                                goto out_unlock;
+               } else if (sch->type == SCTP_CID_HEARTBEAT ||
+                          sch->type == SCTP_CID_HEARTBEAT_ACK) {
+                       if (ct->proto.sctp.vtag[dir] == 0) {
+                               pr_debug("Setting vtag %x for dir %d\n",
+                                        sh->vtag, dir);
+                               ct->proto.sctp.vtag[dir] = sh->vtag;
+                       } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+                               pr_debug("Verification tag check failed\n");
+                               goto out_unlock;
+                       }
                }
 
                old_state = ct->proto.sctp.state;
@@ -466,6 +499,10 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
                                /* Sec 8.5.1 (A) */
                                return false;
                        }
+               } else if (sch->type == SCTP_CID_HEARTBEAT) {
+                       pr_debug("Setting vtag %x for secondary conntrack\n",
+                                sh->vtag);
+                       ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
                }
                /* If it is a shutdown ack OOTB packet, we expect a return
                   shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
@@ -610,6 +647,8 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
        [CTA_TIMEOUT_SCTP_SHUTDOWN_SENT]        = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_SHUTDOWN_RECD]        = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT]    = { .type = NLA_U32 },
+       [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT]       = { .type = NLA_U32 },
+       [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED]      = { .type = NLA_U32 },
 };
 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
 
@@ -658,6 +697,18 @@ static struct ctl_table sctp_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "nf_conntrack_sctp_timeout_heartbeat_sent",
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       {
+               .procname       = "nf_conntrack_sctp_timeout_heartbeat_acked",
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        { }
 };
 
@@ -730,6 +781,8 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
        pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
        pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
        pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+       pn->ctl_table[7].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_SENT];
+       pn->ctl_table[8].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_ACKED];
 #endif
        return 0;
 }
index 399210693c2a8bfac527e85ea6285757c70c1571..065522564ac6a032ce1ece83be62438f7d8b0636 100644 (file)
@@ -19,7 +19,7 @@ unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
 /* nf_queue.c */
 int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
             struct nf_hook_state *state, unsigned int queuenum);
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops);
 int __init netfilter_queue_init(void);
 
 /* nf_log.c */
index 8a8b2abc35ffdeacb5a61e1a801c92f91998bc3d..96777f9a9350b3a684ae56b50c77400fde3ad305 100644 (file)
@@ -105,21 +105,15 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
 {
        const struct nf_queue_handler *qh;
-       struct net *net;
 
-       rtnl_lock();
        rcu_read_lock();
        qh = rcu_dereference(queue_handler);
-       if (qh) {
-               for_each_net(net) {
-                       qh->nf_hook_drop(net, ops);
-               }
-       }
+       if (qh)
+               qh->nf_hook_drop(net, ops);
        rcu_read_unlock();
-       rtnl_unlock();
 }
 
 /*
index 789feeae6c44a82b44e040ae2a6bec6f26cbe450..71f1e9fdfa18fb9b1f2f2730ca21af42dad98eea 100644 (file)
@@ -349,12 +349,10 @@ static void __net_exit synproxy_proc_exit(struct net *net)
 static int __net_init synproxy_net_init(struct net *net)
 {
        struct synproxy_net *snet = synproxy_pernet(net);
-       struct nf_conntrack_tuple t;
        struct nf_conn *ct;
        int err = -ENOMEM;
 
-       memset(&t, 0, sizeof(t));
-       ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL);
+       ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
        if (IS_ERR(ct)) {
                err = PTR_ERR(ct);
                goto err1;
@@ -365,7 +363,8 @@ static int __net_init synproxy_net_init(struct net *net)
        if (!nfct_synproxy_ext_add(ct))
                goto err2;
 
-       nf_conntrack_tmpl_insert(net, ct);
+       __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+       nf_conntrack_get(&ct->ct_general);
        snet->tmpl = ct;
 
        snet->stats = alloc_percpu(struct synproxy_stats);
index cfe636808541a07f2c4b69c29737d88a167efe2f..4a41eb92bcc0ab934f12305ad64a18c8804a52b2 100644 (file)
@@ -130,20 +130,24 @@ static void nft_trans_destroy(struct nft_trans *trans)
 int nft_register_basechain(struct nft_base_chain *basechain,
                           unsigned int hook_nops)
 {
+       struct net *net = read_pnet(&basechain->pnet);
+
        if (basechain->flags & NFT_BASECHAIN_DISABLED)
                return 0;
 
-       return nf_register_hooks(basechain->ops, hook_nops);
+       return nf_register_net_hooks(net, basechain->ops, hook_nops);
 }
 EXPORT_SYMBOL_GPL(nft_register_basechain);
 
 void nft_unregister_basechain(struct nft_base_chain *basechain,
                              unsigned int hook_nops)
 {
+       struct net *net = read_pnet(&basechain->pnet);
+
        if (basechain->flags & NFT_BASECHAIN_DISABLED)
                return;
 
-       nf_unregister_hooks(basechain->ops, hook_nops);
+       nf_unregister_net_hooks(net, basechain->ops, hook_nops);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_basechain);
 
index f77bad46ac683bae9797846ce15f31ded95d00f1..05d0b03530f6b21418d69c0c0f51916228d26b94 100644 (file)
@@ -114,7 +114,6 @@ unsigned int
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
        const struct nft_chain *chain = ops->priv, *basechain = chain;
-       const struct net *chain_net = read_pnet(&nft_base_chain(basechain)->pnet);
        const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
@@ -125,10 +124,6 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        int rulenum;
        unsigned int gencursor = nft_genmask_cur(net);
 
-       /* Ignore chains that are not for the current network namespace */
-       if (!net_eq(net, chain_net))
-               return NF_ACCEPT;
-
 do_chain:
        rulenum = 0;
        rule = list_entry(&chain->rules, struct nft_rule, list);
index 52561e1c31e26933dd654f095663c0f0a633f007..cb2f13ebb5a66cdf5fd9498ddfe8949f38741803 100644 (file)
@@ -166,11 +166,13 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                        goto err;
                *dest = out->group;
                break;
+#ifdef CONFIG_CGROUP_NET_CLASSID
        case NFT_META_CGROUP:
                if (skb->sk == NULL || !sk_fullsock(skb->sk))
                        goto err;
                *dest = skb->sk->sk_classid;
                break;
+#endif
        default:
                WARN_ON(1);
                goto err;
@@ -246,7 +248,9 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
        case NFT_META_CPU:
        case NFT_META_IIFGROUP:
        case NFT_META_OIFGROUP:
+#ifdef CONFIG_CGROUP_NET_CLASSID
        case NFT_META_CGROUP:
+#endif
                len = sizeof(u32);
                break;
        case NFT_META_IIFNAME:
index d324fe71260c9f24b02507e4f429c0ba1e328d98..9b42b5ea6dcd68c8398c501aa5af81b6dfa83ae8 100644 (file)
@@ -67,9 +67,6 @@ static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
        [NFPROTO_IPV6]   = "ip6",
 };
 
-/* Allow this many total (re)entries. */
-static const unsigned int xt_jumpstack_multiplier = 2;
-
 /* Registration hooks for targets. */
 int xt_register_target(struct xt_target *target)
 {
@@ -688,8 +685,6 @@ void xt_free_table_info(struct xt_table_info *info)
                kvfree(info->jumpstack);
        }
 
-       free_percpu(info->stackptr);
-
        kvfree(info);
 }
 EXPORT_SYMBOL(xt_free_table_info);
@@ -732,15 +727,14 @@ EXPORT_SYMBOL_GPL(xt_compat_unlock);
 DEFINE_PER_CPU(seqcount_t, xt_recseq);
 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
 
+struct static_key xt_tee_enabled __read_mostly;
+EXPORT_SYMBOL_GPL(xt_tee_enabled);
+
 static int xt_jumpstack_alloc(struct xt_table_info *i)
 {
        unsigned int size;
        int cpu;
 
-       i->stackptr = alloc_percpu(unsigned int);
-       if (i->stackptr == NULL)
-               return -ENOMEM;
-
        size = sizeof(void **) * nr_cpu_ids;
        if (size > PAGE_SIZE)
                i->jumpstack = vzalloc(size);
@@ -749,8 +743,21 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
        if (i->jumpstack == NULL)
                return -ENOMEM;
 
-       i->stacksize *= xt_jumpstack_multiplier;
-       size = sizeof(void *) * i->stacksize;
+       /* ruleset without jumps -- no stack needed */
+       if (i->stacksize == 0)
+               return 0;
+
+       /* Jumpstack needs to be able to record two full callchains, one
+        * from the first rule set traversal, plus one table reentrancy
+        * via -j TEE without clobbering the callchain that brought us to
+        * TEE target.
+        *
+        * This is done by allocating two jumpstacks per cpu, on reentry
+        * the upper half of the stack is used.
+        *
+        * see the jumpstack setup in ipt_do_table() for more details.
+        */
+       size = sizeof(void *) * i->stacksize * 2u;
        for_each_possible_cpu(cpu) {
                if (size > PAGE_SIZE)
                        i->jumpstack[cpu] = vmalloc_node(size,
index 75747aecdebe6344ccbfcf178c299be013d8f763..c6630030c9121c7af27a3052ad776cd6646eb601 100644 (file)
@@ -184,7 +184,6 @@ out:
 static int xt_ct_tg_check(const struct xt_tgchk_param *par,
                          struct xt_ct_target_info_v1 *info)
 {
-       struct nf_conntrack_tuple t;
        struct nf_conn *ct;
        int ret = -EOPNOTSUPP;
 
@@ -202,8 +201,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
        if (ret < 0)
                goto err1;
 
-       memset(&t, 0, sizeof(t));
-       ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
+       ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
        ret = PTR_ERR(ct);
        if (IS_ERR(ct))
                goto err2;
@@ -227,8 +225,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
                if (ret < 0)
                        goto err3;
        }
-
-       nf_conntrack_tmpl_insert(par->net, ct);
+       __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+       nf_conntrack_get(&ct->ct_general);
 out:
        info->ct = ct;
        return 0;
index f407ebc13481ae5caa0f634db643d034c7af7e6a..29d2c31f406ca585d5f0eb1f08bcaf26d8364053 100644 (file)
@@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
                goto out;
        }
 
+       sysfs_attr_init(&info->timer->attr.attr);
        info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
        if (!info->timer->attr.attr.name) {
                ret = -ENOMEM;
index a747eb475b68e174db6a0f3ebe41de8ec5ca7d61..c5d6556dbc5e407cffca198ac5fe66b97a0cb908 100644 (file)
@@ -37,7 +37,6 @@ struct xt_tee_priv {
 };
 
 static const union nf_inet_addr tee_zero_address;
-static DEFINE_PER_CPU(bool, tee_active);
 
 static struct net *pick_net(struct sk_buff *skb)
 {
@@ -88,7 +87,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        const struct xt_tee_tginfo *info = par->targinfo;
        struct iphdr *iph;
 
-       if (__this_cpu_read(tee_active))
+       if (__this_cpu_read(nf_skb_duplicated))
                return XT_CONTINUE;
        /*
         * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
@@ -125,9 +124,9 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        ip_send_check(iph);
 
        if (tee_tg_route4(skb, info)) {
-               __this_cpu_write(tee_active, true);
+               __this_cpu_write(nf_skb_duplicated, true);
                ip_local_out(skb);
-               __this_cpu_write(tee_active, false);
+               __this_cpu_write(nf_skb_duplicated, false);
        } else {
                kfree_skb(skb);
        }
@@ -170,7 +169,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_tee_tginfo *info = par->targinfo;
 
-       if (__this_cpu_read(tee_active))
+       if (__this_cpu_read(nf_skb_duplicated))
                return XT_CONTINUE;
        skb = pskb_copy(skb, GFP_ATOMIC);
        if (skb == NULL)
@@ -188,9 +187,9 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                --iph->hop_limit;
        }
        if (tee_tg_route6(skb, info)) {
-               __this_cpu_write(tee_active, true);
+               __this_cpu_write(nf_skb_duplicated, true);
                ip6_local_out(skb);
-               __this_cpu_write(tee_active, false);
+               __this_cpu_write(nf_skb_duplicated, false);
        } else {
                kfree_skb(skb);
        }
@@ -252,6 +251,7 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
        } else
                info->priv = NULL;
 
+       static_key_slow_inc(&xt_tee_enabled);
        return 0;
 }
 
@@ -263,6 +263,7 @@ static void tee_tg_destroy(const struct xt_tgdtor_param *par)
                unregister_netdevice_notifier(&info->priv->notifier);
                kfree(info->priv);
        }
+       static_key_slow_dec(&xt_tee_enabled);
 }
 
 static struct xt_target tee_tg_reg[] __read_mostly = {
index cca96cec1b689fcd104e273a64db6eda44171beb..d0c96c5ae29aa84057e93a5d9796014dca4c52e8 100644 (file)
@@ -272,8 +272,7 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
                                            hp->source, lport ? lport : hp->dest,
                                            skb->dev, NFT_LOOKUP_LISTENER);
                if (sk2) {
-                       inet_twsk_deschedule(inet_twsk(sk));
-                       inet_twsk_put(inet_twsk(sk));
+                       inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
                }
        }
@@ -437,8 +436,7 @@ tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                                            tgi->lport ? tgi->lport : hp->dest,
                                            skb->dev, NFT_LOOKUP_LISTENER);
                if (sk2) {
-                       inet_twsk_deschedule(inet_twsk(sk));
-                       inet_twsk_put(inet_twsk(sk));
+                       inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
                }
        }
index 9a0ae7172f9271851f7a7c036e1c1980f45e5255..d8e2e3918ce2fd95637c4cba8bfc4886feb91ea6 100644 (file)
@@ -357,25 +357,52 @@ err1:
        return NULL;
 }
 
+
+static void
+__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
+                  unsigned int order)
+{
+       struct netlink_sock *nlk = nlk_sk(sk);
+       struct sk_buff_head *queue;
+       struct netlink_ring *ring;
+
+       queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+       ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+
+       spin_lock_bh(&queue->lock);
+
+       ring->frame_max         = req->nm_frame_nr - 1;
+       ring->head              = 0;
+       ring->frame_size        = req->nm_frame_size;
+       ring->pg_vec_pages      = req->nm_block_size / PAGE_SIZE;
+
+       swap(ring->pg_vec_len, req->nm_block_nr);
+       swap(ring->pg_vec_order, order);
+       swap(ring->pg_vec, pg_vec);
+
+       __skb_queue_purge(queue);
+       spin_unlock_bh(&queue->lock);
+
+       WARN_ON(atomic_read(&nlk->mapped));
+
+       if (pg_vec)
+               free_pg_vec(pg_vec, order, req->nm_block_nr);
+}
+
 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
-                           bool closing, bool tx_ring)
+                           bool tx_ring)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
        struct netlink_ring *ring;
-       struct sk_buff_head *queue;
        void **pg_vec = NULL;
        unsigned int order = 0;
-       int err;
 
        ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
-       queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
 
-       if (!closing) {
-               if (atomic_read(&nlk->mapped))
-                       return -EBUSY;
-               if (atomic_read(&ring->pending))
-                       return -EBUSY;
-       }
+       if (atomic_read(&nlk->mapped))
+               return -EBUSY;
+       if (atomic_read(&ring->pending))
+               return -EBUSY;
 
        if (req->nm_block_nr) {
                if (ring->pg_vec != NULL)
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
                        return -EINVAL;
        }
 
-       err = -EBUSY;
        mutex_lock(&nlk->pg_vec_lock);
-       if (closing || atomic_read(&nlk->mapped) == 0) {
-               err = 0;
-               spin_lock_bh(&queue->lock);
-
-               ring->frame_max         = req->nm_frame_nr - 1;
-               ring->head              = 0;
-               ring->frame_size        = req->nm_frame_size;
-               ring->pg_vec_pages      = req->nm_block_size / PAGE_SIZE;
-
-               swap(ring->pg_vec_len, req->nm_block_nr);
-               swap(ring->pg_vec_order, order);
-               swap(ring->pg_vec, pg_vec);
-
-               __skb_queue_purge(queue);
-               spin_unlock_bh(&queue->lock);
-
-               WARN_ON(atomic_read(&nlk->mapped));
+       if (atomic_read(&nlk->mapped) == 0) {
+               __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
+               mutex_unlock(&nlk->pg_vec_lock);
+               return 0;
        }
+
        mutex_unlock(&nlk->pg_vec_lock);
 
        if (pg_vec)
                free_pg_vec(pg_vec, order, req->nm_block_nr);
-       return err;
+
+       return -EBUSY;
 }
 
 static void netlink_mm_open(struct vm_area_struct *vma)
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk)
 
                memset(&req, 0, sizeof(req));
                if (nlk->rx_ring.pg_vec)
-                       netlink_set_ring(sk, &req, true, false);
+                       __netlink_set_ring(sk, &req, false, NULL, 0);
                memset(&req, 0, sizeof(req));
                if (nlk->tx_ring.pg_vec)
-                       netlink_set_ring(sk, &req, true, true);
+                       __netlink_set_ring(sk, &req, true, NULL, 0);
        }
 #endif /* CONFIG_NETLINK_MMAP */
 
@@ -2223,7 +2238,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                        return -EINVAL;
                if (copy_from_user(&req, optval, sizeof(req)))
                        return -EFAULT;
-               err = netlink_set_ring(sk, &req, false,
+               err = netlink_set_ring(sk, &req,
                                       optname == NETLINK_TX_RING);
                break;
        }
index 91b9478413ef1ee97602666142c65e9a2fad5228..6e1701de04d804ee86d31a9802e5fa5133e5ac15 100644 (file)
@@ -15,6 +15,6 @@ openvswitch-y := \
        vport-internal_dev.o \
        vport-netdev.o
 
+obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
-obj-$(CONFIG_OPENVSWITCH_VXLAN)        += vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GRE)  += vport-gre.o
index 8a8c0b8b4f63a4bd8e5ff776250189558e6fcb1e..cf04c2f8b32a57bfec8e024d2db95dd8c0468b3f 100644 (file)
@@ -611,7 +611,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                            struct sw_flow_key *key, const struct nlattr *attr,
                            const struct nlattr *actions, int actions_len)
 {
-       struct ovs_tunnel_info info;
+       struct ip_tunnel_info info;
        struct dp_upcall_info upcall;
        const struct nlattr *a;
        int rem;
@@ -733,7 +733,15 @@ static int execute_set_action(struct sk_buff *skb,
 {
        /* Only tunnel set execution is supported without a mask. */
        if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
-               OVS_CB(skb)->egress_tun_info = nla_data(a);
+               struct ovs_tunnel_info *tun = nla_data(a);
+
+               skb_dst_drop(skb);
+               dst_hold((struct dst_entry *)tun->tun_dst);
+               skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
+
+               /* FIXME: Remove when all vports have been converted */
+               OVS_CB(skb)->egress_tun_info = &tun->tun_dst->u.tun_info;
+
                return 0;
        }
 
index ff8c4a4c160986bf206f4751860f9767e71246bf..ffe984f5b95ce36f15e02de152e453233290fbf4 100644 (file)
@@ -176,7 +176,7 @@ static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
 const char *ovs_dp_name(const struct datapath *dp)
 {
        struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
-       return vport->ops->get_name(vport);
+       return ovs_vport_name(vport);
 }
 
 static int get_dpifindex(const struct datapath *dp)
@@ -188,7 +188,7 @@ static int get_dpifindex(const struct datapath *dp)
 
        local = ovs_vport_rcu(dp, OVSP_LOCAL);
        if (local)
-               ifindex = netdev_vport_priv(local)->dev->ifindex;
+               ifindex = local->dev->ifindex;
        else
                ifindex = 0;
 
@@ -1018,7 +1018,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                }
                ovs_unlock();
 
-               ovs_nla_free_flow_actions(old_acts);
+               ovs_nla_free_flow_actions_rcu(old_acts);
                ovs_flow_free(new_flow, false);
        }
 
@@ -1030,7 +1030,7 @@ err_unlock_ovs:
        ovs_unlock();
        kfree_skb(reply);
 err_kfree_acts:
-       kfree(acts);
+       ovs_nla_free_flow_actions(acts);
 err_kfree_flow:
        ovs_flow_free(new_flow, false);
 error:
@@ -1157,7 +1157,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
        if (reply)
                ovs_notify(&dp_flow_genl_family, reply, info);
        if (old_acts)
-               ovs_nla_free_flow_actions(old_acts);
+               ovs_nla_free_flow_actions_rcu(old_acts);
 
        return 0;
 
@@ -1165,7 +1165,7 @@ err_unlock_ovs:
        ovs_unlock();
        kfree_skb(reply);
 err_kfree_acts:
-       kfree(acts);
+       ovs_nla_free_flow_actions(acts);
 error:
        return error;
 }
@@ -1800,7 +1800,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
        if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
            nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
            nla_put_string(skb, OVS_VPORT_ATTR_NAME,
-                          vport->ops->get_name(vport)))
+                          ovs_vport_name(vport)))
                goto nla_put_failure;
 
        ovs_vport_get_stats(vport, &vport_stats);
@@ -2219,13 +2219,10 @@ static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
                        struct vport *vport;
 
                        hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
-                               struct netdev_vport *netdev_vport;
-
                                if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
                                        continue;
 
-                               netdev_vport = netdev_vport_priv(vport);
-                               if (dev_net(netdev_vport->dev) == dnet)
+                               if (dev_net(vport->dev) == dnet)
                                        list_add(&vport->detach_list, head);
                        }
                }
index cd691e935e08c76b75e8ed90d7ec79ce9875ee3c..6b28c5cedb23826fe151958552234332ef7ca201 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/u64_stats_sync.h>
+#include <net/ip_tunnels.h>
 
 #include "flow.h"
 #include "flow_table.h"
@@ -98,7 +99,7 @@ struct datapath {
  * when a packet is received by OVS.
  */
 struct ovs_skb_cb {
-       struct ovs_tunnel_info  *egress_tun_info;
+       struct ip_tunnel_info  *egress_tun_info;
        struct vport            *input_vport;
 };
 #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
@@ -114,7 +115,7 @@ struct ovs_skb_cb {
  * @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
  */
 struct dp_upcall_info {
-       const struct ovs_tunnel_info *egress_tun_info;
+       const struct ip_tunnel_info *egress_tun_info;
        const struct nlattr *userdata;
        const struct nlattr *actions;
        int actions_len;
index 2c631fe76be191c1a7dd1bc33c1901c8507621b4..a7a80a6b77b0ab15ecdd5859ee732417ee4a4705 100644 (file)
@@ -58,13 +58,10 @@ void ovs_dp_notify_wq(struct work_struct *work)
                        struct hlist_node *n;
 
                        hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
-                               struct netdev_vport *netdev_vport;
-
                                if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
                                        continue;
 
-                               netdev_vport = netdev_vport_priv(vport);
-                               if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
+                               if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
                                        dp_detach_port_notify(vport);
                        }
                }
index bc7b0aba994adf6f8ec8cfc2b3278b46d19621db..8db22ef73626c8cb9b428d77f3ec24eea8b61d97 100644 (file)
@@ -682,12 +682,12 @@ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
        return key_extract(skb, key);
 }
 
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                         struct sk_buff *skb, struct sw_flow_key *key)
 {
        /* Extract metadata from packet. */
        if (tun_info) {
-               memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
+               memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
 
                if (tun_info->options) {
                        BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
index a076e445ccc2e267f2664ddd6e81badc98a2636b..b62cdb3e35892aa56468fbc17b1cd0ef3450af6a 100644 (file)
 #include <linux/time.h>
 #include <linux/flex_array.h>
 #include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+#include <net/dst_metadata.h>
 
 struct sk_buff;
 
-/* Used to memset ovs_key_ipv4_tunnel padding. */
-#define OVS_TUNNEL_KEY_SIZE                                    \
-       (offsetof(struct ovs_key_ipv4_tunnel, tp_dst) +         \
-        FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst))
-
-struct ovs_key_ipv4_tunnel {
-       __be64 tun_id;
-       __be32 ipv4_src;
-       __be32 ipv4_dst;
-       __be16 tun_flags;
-       u8   ipv4_tos;
-       u8   ipv4_ttl;
-       __be16 tp_src;
-       __be16 tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
-
-struct ovs_tunnel_info {
-       struct ovs_key_ipv4_tunnel tunnel;
-       const void *options;
-       u8 options_len;
-};
-
 /* Store options at the end of the array if they are less than the
  * maximum size. This allows us to get the benefits of variable length
  * matching for small options.
@@ -66,54 +46,9 @@ struct ovs_tunnel_info {
 #define TUN_METADATA_OPTS(flow_key, opt_len) \
        ((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
 
-static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
-                                           __be32 saddr, __be32 daddr,
-                                           u8 tos, u8 ttl,
-                                           __be16 tp_src,
-                                           __be16 tp_dst,
-                                           __be64 tun_id,
-                                           __be16 tun_flags,
-                                           const void *opts,
-                                           u8 opts_len)
-{
-       tun_info->tunnel.tun_id = tun_id;
-       tun_info->tunnel.ipv4_src = saddr;
-       tun_info->tunnel.ipv4_dst = daddr;
-       tun_info->tunnel.ipv4_tos = tos;
-       tun_info->tunnel.ipv4_ttl = ttl;
-       tun_info->tunnel.tun_flags = tun_flags;
-
-       /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
-        * the upper tunnel are used.
-        * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
-        */
-       tun_info->tunnel.tp_src = tp_src;
-       tun_info->tunnel.tp_dst = tp_dst;
-
-       /* Clear struct padding. */
-       if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE)
-               memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE,
-                      0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
-
-       tun_info->options = opts;
-       tun_info->options_len = opts_len;
-}
-
-static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
-                                         const struct iphdr *iph,
-                                         __be16 tp_src,
-                                         __be16 tp_dst,
-                                         __be64 tun_id,
-                                         __be16 tun_flags,
-                                         const void *opts,
-                                         u8 opts_len)
-{
-       __ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
-                                iph->tos, iph->ttl,
-                                tp_src, tp_dst,
-                                tun_id, tun_flags,
-                                opts, opts_len);
-}
+struct ovs_tunnel_info {
+       struct metadata_dst     *tun_dst;
+};
 
 #define OVS_SW_FLOW_KEY_METADATA_SIZE                  \
        (offsetof(struct sw_flow_key, recirc_id) +      \
@@ -122,7 +57,7 @@ static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
 struct sw_flow_key {
        u8 tun_opts[255];
        u8 tun_opts_len;
-       struct ovs_key_ipv4_tunnel tun_key;  /* Encapsulating tunnel key. */
+       struct ip_tunnel_key tun_key;   /* Encapsulating tunnel key. */
        struct {
                u32     priority;       /* Packet QoS priority. */
                u32     skb_mark;       /* SKB mark. */
@@ -273,7 +208,7 @@ void ovs_flow_stats_clear(struct sw_flow *);
 u64 ovs_flow_used_time(unsigned long flow_jiffies);
 
 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                         struct sk_buff *skb,
                         struct sw_flow_key *key);
 /* Extract key from packet coming from userspace. */
index 624e41c4267fe0206fe94ede37e3160d80497abf..a6eb77ab1a6456768338a55290955bb29c69749a 100644 (file)
@@ -47,9 +47,9 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/mpls.h>
+#include <net/vxlan.h>
 
 #include "flow_netlink.h"
-#include "vport-vxlan.h"
 
 struct ovs_len_tbl {
        int len;
@@ -475,7 +475,7 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
 {
        struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
        unsigned long opt_key_offset;
-       struct ovs_vxlan_opts opts;
+       struct vxlan_metadata opts;
        int err;
 
        BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
@@ -626,7 +626,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
                               const void *tun_opts, int swkey_tun_opts_len)
 {
-       const struct ovs_vxlan_opts *opts = tun_opts;
+       const struct vxlan_metadata *opts = tun_opts;
        struct nlattr *nla;
 
        nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
@@ -641,7 +641,7 @@ static int vxlan_opt_to_nlattr(struct sk_buff *skb,
 }
 
 static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
-                               const struct ovs_key_ipv4_tunnel *output,
+                               const struct ip_tunnel_key *output,
                                const void *tun_opts, int swkey_tun_opts_len)
 {
        if (output->tun_flags & TUNNEL_KEY &&
@@ -689,7 +689,7 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
 }
 
 static int ipv4_tun_to_nlattr(struct sk_buff *skb,
-                             const struct ovs_key_ipv4_tunnel *output,
+                             const struct ip_tunnel_key *output,
                              const void *tun_opts, int swkey_tun_opts_len)
 {
        struct nlattr *nla;
@@ -708,9 +708,9 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb,
 }
 
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
-                                 const struct ovs_tunnel_info *egress_tun_info)
+                                 const struct ip_tunnel_info *egress_tun_info)
 {
-       return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
+       return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
                                    egress_tun_info->options,
                                    egress_tun_info->options_len);
 }
@@ -1548,11 +1548,48 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
        return sfa;
 }
 
+static void ovs_nla_free_set_action(const struct nlattr *a)
+{
+       const struct nlattr *ovs_key = nla_data(a);
+       struct ovs_tunnel_info *ovs_tun;
+
+       switch (nla_type(ovs_key)) {
+       case OVS_KEY_ATTR_TUNNEL_INFO:
+               ovs_tun = nla_data(ovs_key);
+               dst_release((struct dst_entry *)ovs_tun->tun_dst);
+               break;
+       }
+}
+
+void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+{
+       const struct nlattr *a;
+       int rem;
+
+       if (!sf_acts)
+               return;
+
+       nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
+               switch (nla_type(a)) {
+               case OVS_ACTION_ATTR_SET:
+                       ovs_nla_free_set_action(a);
+                       break;
+               }
+       }
+
+       kfree(sf_acts);
+}
+
+static void __ovs_nla_free_flow_actions(struct rcu_head *head)
+{
+       ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
+}
+
 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
  * The caller must hold rcu_read_lock for this to be sensible. */
-void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
 {
-       kfree_rcu(sf_acts, rcu);
+       call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
 }
 
 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
@@ -1746,7 +1783,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
 {
        struct sw_flow_match match;
        struct sw_flow_key key;
-       struct ovs_tunnel_info *tun_info;
+       struct metadata_dst *tun_dst;
+       struct ip_tunnel_info *tun_info;
+       struct ovs_tunnel_info *ovs_tun;
        struct nlattr *a;
        int err = 0, start, opts_type;
 
@@ -1771,13 +1810,23 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
        if (start < 0)
                return start;
 
+       tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
+       if (!tun_dst)
+               return -ENOMEM;
+
        a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
-                        sizeof(*tun_info) + key.tun_opts_len, log);
-       if (IS_ERR(a))
+                        sizeof(*ovs_tun), log);
+       if (IS_ERR(a)) {
+               dst_release((struct dst_entry *)tun_dst);
                return PTR_ERR(a);
+       }
+
+       ovs_tun = nla_data(a);
+       ovs_tun->tun_dst = tun_dst;
 
-       tun_info = nla_data(a);
-       tun_info->tunnel = key.tun_key;
+       tun_info = &tun_dst->u.tun_info;
+       tun_info->mode = IP_TUNNEL_INFO_TX;
+       tun_info->key = key.tun_key;
        tun_info->options_len = key.tun_opts_len;
 
        if (tun_info->options_len) {
@@ -2177,7 +2226,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
        err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
                                     key->eth.tci, log);
        if (err)
-               kfree(*sfa);
+               ovs_nla_free_flow_actions(*sfa);
 
        return err;
 }
@@ -2227,13 +2276,14 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
 
        switch (key_type) {
        case OVS_KEY_ATTR_TUNNEL_INFO: {
-               struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
+               struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
+               struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
 
                start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
                if (!start)
                        return -EMSGSIZE;
 
-               err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
+               err = ipv4_tun_to_nlattr(skb, &tun_info->key,
                                         tun_info->options_len ?
                                                tun_info->options : NULL,
                                         tun_info->options_len);
index 5c3d75bff3104a1ba0ea1b916900514245e9af2f..acd074408f0aa2167410e3dbf6a05052ff3b4a4b 100644 (file)
@@ -55,7 +55,7 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
                      const struct nlattr *mask, bool log);
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
-                                 const struct ovs_tunnel_info *);
+                                 const struct ip_tunnel_info *);
 
 bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
@@ -69,5 +69,6 @@ int ovs_nla_put_actions(const struct nlattr *attr,
                        int len, struct sk_buff *skb);
 
 void ovs_nla_free_flow_actions(struct sw_flow_actions *);
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
 
 #endif /* flow_netlink.h */
index 4613df8c82900e32a4e0188688c9e0354022484d..3a9d1dde76ed3457bbe527053d95bd9ec774f100 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "flow.h"
 #include "datapath.h"
+#include "flow_netlink.h"
 #include <linux/uaccess.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -143,7 +144,8 @@ static void flow_free(struct sw_flow *flow)
 
        if (ovs_identifier_is_key(&flow->id))
                kfree(flow->id.unmasked_key);
-       kfree((struct sw_flow_actions __force *)flow->sf_acts);
+       if (flow->sf_acts)
+               ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
        for_each_node(node)
                if (flow->stats[node])
                        kmem_cache_free(flow_stats_cache,
@@ -752,7 +754,7 @@ int ovs_flow_init(void)
        BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
 
        flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
-                                      + (num_possible_nodes()
+                                      + (nr_node_ids
                                          * sizeof(struct flow_stats *)),
                                       0, 0, NULL);
        if (flow_cache == NULL)
index 208c576bd1b683d909f1d9c2e2c09de610057f55..1da3a14d10101f78881c392a06b49aa9e53c9a13 100644 (file)
@@ -77,7 +77,7 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
        struct vport *vport = gs->rcv_data;
        struct genevehdr *geneveh = geneve_hdr(skb);
        int opts_len;
-       struct ovs_tunnel_info tun_info;
+       struct ip_tunnel_info tun_info;
        __be64 key;
        __be16 flags;
 
@@ -90,10 +90,9 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
 
        key = vni_to_tunnel_id(geneveh->vni);
 
-       ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
-                              udp_hdr(skb)->source, udp_hdr(skb)->dest,
-                              key, flags,
-                              geneveh->options, opts_len);
+       ip_tunnel_info_init(&tun_info, ip_hdr(skb),
+                           udp_hdr(skb)->source, udp_hdr(skb)->dest,
+                           key, flags, geneveh->options, opts_len);
 
        ovs_vport_receive(vport, skb, &tun_info);
 }
@@ -165,8 +164,8 @@ error:
 
 static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
-       const struct ovs_key_ipv4_tunnel *tun_key;
-       struct ovs_tunnel_info *tun_info;
+       const struct ip_tunnel_key *tun_key;
+       struct ip_tunnel_info *tun_info;
        struct net *net = ovs_dp_get_net(vport->dp);
        struct geneve_port *geneve_port = geneve_vport(vport);
        __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
@@ -183,7 +182,7 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
                goto error;
        }
 
-       tun_key = &tun_info->tunnel;
+       tun_key = &tun_info->key;
        rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
@@ -225,7 +224,7 @@ static const char *geneve_get_name(const struct vport *vport)
 }
 
 static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                     struct ovs_tunnel_info *egress_tun_info)
+                                     struct ip_tunnel_info *egress_tun_info)
 {
        struct geneve_port *geneve_port = geneve_vport(vport);
        struct net *net = ovs_dp_get_net(vport->dp);
index f17ac9642f4ee3cca4ce9bece9bafa32de786621..b87656c66aaffe3b6ba905cbb84cdc0ec0b0e31b 100644 (file)
@@ -67,9 +67,9 @@ static struct sk_buff *__build_header(struct sk_buff *skb,
                                      int tunnel_hlen)
 {
        struct tnl_ptk_info tpi;
-       const struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ip_tunnel_key *tun_key;
 
-       tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
+       tun_key = &OVS_CB(skb)->egress_tun_info->key;
 
        skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
        if (IS_ERR(skb))
@@ -97,7 +97,7 @@ static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
 static int gre_rcv(struct sk_buff *skb,
                   const struct tnl_ptk_info *tpi)
 {
-       struct ovs_tunnel_info tun_info;
+       struct ip_tunnel_info tun_info;
        struct ovs_net *ovs_net;
        struct vport *vport;
        __be64 key;
@@ -108,8 +108,8 @@ static int gre_rcv(struct sk_buff *skb,
                return PACKET_REJECT;
 
        key = key_to_tunnel_id(tpi->key, tpi->seq);
-       ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
-                              filter_tnl_flags(tpi->flags), NULL, 0);
+       ip_tunnel_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
+                           filter_tnl_flags(tpi->flags), NULL, 0);
 
        ovs_vport_receive(vport, skb, &tun_info);
        return PACKET_RCVD;
@@ -134,7 +134,7 @@ static int gre_err(struct sk_buff *skb, u32 info,
 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
-       const struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ip_tunnel_key *tun_key;
        struct flowi4 fl;
        struct rtable *rt;
        int min_headroom;
@@ -147,7 +147,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
                goto err_free_skb;
        }
 
-       tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
+       tun_key = &OVS_CB(skb)->egress_tun_info->key;
        rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
@@ -277,7 +277,7 @@ static void gre_tnl_destroy(struct vport *vport)
 }
 
 static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                  struct ovs_tunnel_info *egress_tun_info)
+                                  struct ip_tunnel_info *egress_tun_info)
 {
        return ovs_tunnel_get_egress_info(egress_tun_info,
                                          ovs_dp_get_net(vport->dp),
index 6a55f71055051957685b11e8950ba1e4197dce20..c058bbf876c343691d1874e632926e80ac4b0130 100644 (file)
@@ -156,49 +156,44 @@ static void do_setup(struct net_device *netdev)
 static struct vport *internal_dev_create(const struct vport_parms *parms)
 {
        struct vport *vport;
-       struct netdev_vport *netdev_vport;
        struct internal_dev *internal_dev;
        int err;
 
-       vport = ovs_vport_alloc(sizeof(struct netdev_vport),
-                               &ovs_internal_vport_ops, parms);
+       vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
        if (IS_ERR(vport)) {
                err = PTR_ERR(vport);
                goto error;
        }
 
-       netdev_vport = netdev_vport_priv(vport);
-
-       netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
-                                        parms->name, NET_NAME_UNKNOWN,
-                                        do_setup);
-       if (!netdev_vport->dev) {
+       vport->dev = alloc_netdev(sizeof(struct internal_dev),
+                                 parms->name, NET_NAME_UNKNOWN, do_setup);
+       if (!vport->dev) {
                err = -ENOMEM;
                goto error_free_vport;
        }
 
-       dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
-       internal_dev = internal_dev_priv(netdev_vport->dev);
+       dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
+       internal_dev = internal_dev_priv(vport->dev);
        internal_dev->vport = vport;
 
        /* Restrict bridge port to current netns. */
        if (vport->port_no == OVSP_LOCAL)
-               netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+               vport->dev->features |= NETIF_F_NETNS_LOCAL;
 
        rtnl_lock();
-       err = register_netdevice(netdev_vport->dev);
+       err = register_netdevice(vport->dev);
        if (err)
                goto error_free_netdev;
 
-       dev_set_promiscuity(netdev_vport->dev, 1);
+       dev_set_promiscuity(vport->dev, 1);
        rtnl_unlock();
-       netif_start_queue(netdev_vport->dev);
+       netif_start_queue(vport->dev);
 
        return vport;
 
 error_free_netdev:
        rtnl_unlock();
-       free_netdev(netdev_vport->dev);
+       free_netdev(vport->dev);
 error_free_vport:
        ovs_vport_free(vport);
 error:
@@ -207,21 +202,19 @@ error:
 
 static void internal_dev_destroy(struct vport *vport)
 {
-       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
-       netif_stop_queue(netdev_vport->dev);
+       netif_stop_queue(vport->dev);
        rtnl_lock();
-       dev_set_promiscuity(netdev_vport->dev, -1);
+       dev_set_promiscuity(vport->dev, -1);
 
        /* unregister_netdevice() waits for an RCU grace period. */
-       unregister_netdevice(netdev_vport->dev);
+       unregister_netdevice(vport->dev);
 
        rtnl_unlock();
 }
 
 static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
 {
-       struct net_device *netdev = netdev_vport_priv(vport)->dev;
+       struct net_device *netdev = vport->dev;
        int len;
 
        if (unlikely(!(netdev->flags & IFF_UP))) {
@@ -249,7 +242,6 @@ static struct vport_ops ovs_internal_vport_ops = {
        .type           = OVS_VPORT_TYPE_INTERNAL,
        .create         = internal_dev_create,
        .destroy        = internal_dev_destroy,
-       .get_name       = ovs_netdev_get_name,
        .send           = internal_dev_recv,
 };
 
index 33e6d6e2908f553516c5ca97c4b93abee7b7057b..cddb7069b11b7852093baa0efc67eddb73bf3a2b 100644 (file)
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/openvswitch.h>
+#include <linux/export.h>
 
-#include <net/llc.h>
+#include <net/ip_tunnels.h>
+#include <net/rtnetlink.h>
 
 #include "datapath.h"
+#include "vport.h"
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
 
@@ -54,7 +57,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
        skb_push(skb, ETH_HLEN);
        ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
 
-       ovs_vport_receive(vport, skb, NULL);
+       ovs_vport_receive(vport, skb, skb_tunnel_info(skb, AF_INET));
        return;
 
 error:
@@ -83,104 +86,96 @@ static struct net_device *get_dpdev(const struct datapath *dp)
 
        local = ovs_vport_ovsl(dp, OVSP_LOCAL);
        BUG_ON(!local);
-       return netdev_vport_priv(local)->dev;
+       return local->dev;
 }
 
-static struct vport *netdev_create(const struct vport_parms *parms)
+struct vport *ovs_netdev_link(struct vport *vport, const char *name)
 {
-       struct vport *vport;
-       struct netdev_vport *netdev_vport;
        int err;
 
-       vport = ovs_vport_alloc(sizeof(struct netdev_vport),
-                               &ovs_netdev_vport_ops, parms);
-       if (IS_ERR(vport)) {
-               err = PTR_ERR(vport);
-               goto error;
-       }
-
-       netdev_vport = netdev_vport_priv(vport);
-
-       netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
-       if (!netdev_vport->dev) {
+       vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
+       if (!vport->dev) {
                err = -ENODEV;
                goto error_free_vport;
        }
 
-       if (netdev_vport->dev->flags & IFF_LOOPBACK ||
-           netdev_vport->dev->type != ARPHRD_ETHER ||
-           ovs_is_internal_dev(netdev_vport->dev)) {
+       if (vport->dev->flags & IFF_LOOPBACK ||
+           vport->dev->type != ARPHRD_ETHER ||
+           ovs_is_internal_dev(vport->dev)) {
                err = -EINVAL;
                goto error_put;
        }
 
        rtnl_lock();
-       err = netdev_master_upper_dev_link(netdev_vport->dev,
+       err = netdev_master_upper_dev_link(vport->dev,
                                           get_dpdev(vport->dp));
        if (err)
                goto error_unlock;
 
-       err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+       err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
                                         vport);
        if (err)
                goto error_master_upper_dev_unlink;
 
-       dev_disable_lro(netdev_vport->dev);
-       dev_set_promiscuity(netdev_vport->dev, 1);
-       netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+       dev_disable_lro(vport->dev);
+       dev_set_promiscuity(vport->dev, 1);
+       vport->dev->priv_flags |= IFF_OVS_DATAPATH;
        rtnl_unlock();
 
        return vport;
 
 error_master_upper_dev_unlink:
-       netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
+       netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
 error_unlock:
        rtnl_unlock();
 error_put:
-       dev_put(netdev_vport->dev);
+       dev_put(vport->dev);
 error_free_vport:
        ovs_vport_free(vport);
-error:
        return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_link);
+
+static struct vport *netdev_create(const struct vport_parms *parms)
+{
+       struct vport *vport;
+
+       vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
+       if (IS_ERR(vport))
+               return vport;
+
+       return ovs_netdev_link(vport, parms->name);
+}
 
-static void free_port_rcu(struct rcu_head *rcu)
+void ovs_vport_free_rcu(struct rcu_head *rcu)
 {
-       struct netdev_vport *netdev_vport = container_of(rcu,
-                                       struct netdev_vport, rcu);
+       struct vport *vport = container_of(rcu, struct vport, rcu);
 
-       dev_put(netdev_vport->dev);
-       ovs_vport_free(vport_from_priv(netdev_vport));
+       if (vport->dev)
+               dev_put(vport->dev);
+       ovs_vport_free(vport);
 }
+EXPORT_SYMBOL_GPL(ovs_vport_free_rcu);
 
 void ovs_netdev_detach_dev(struct vport *vport)
 {
-       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
        ASSERT_RTNL();
-       netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
-       netdev_rx_handler_unregister(netdev_vport->dev);
-       netdev_upper_dev_unlink(netdev_vport->dev,
-                               netdev_master_upper_dev_get(netdev_vport->dev));
-       dev_set_promiscuity(netdev_vport->dev, -1);
+       vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+       netdev_rx_handler_unregister(vport->dev);
+       netdev_upper_dev_unlink(vport->dev,
+                               netdev_master_upper_dev_get(vport->dev));
+       dev_set_promiscuity(vport->dev, -1);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_detach_dev);
 
 static void netdev_destroy(struct vport *vport)
 {
-       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
        rtnl_lock();
-       if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
+       if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
                ovs_netdev_detach_dev(vport);
        rtnl_unlock();
 
-       call_rcu(&netdev_vport->rcu, free_port_rcu);
-}
-
-const char *ovs_netdev_get_name(const struct vport *vport)
-{
-       const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-       return netdev_vport->dev->name;
+       call_rcu(&vport->rcu, ovs_vport_free_rcu);
 }
 
 static unsigned int packet_length(const struct sk_buff *skb)
@@ -193,20 +188,19 @@ static unsigned int packet_length(const struct sk_buff *skb)
        return length;
 }
 
-static int netdev_send(struct vport *vport, struct sk_buff *skb)
+int ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
 {
-       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-       int mtu = netdev_vport->dev->mtu;
+       int mtu = vport->dev->mtu;
        int len;
 
        if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
                net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
-                                    netdev_vport->dev->name,
+                                    vport->dev->name,
                                     packet_length(skb), mtu);
                goto drop;
        }
 
-       skb->dev = netdev_vport->dev;
+       skb->dev = vport->dev;
        len = skb->len;
        dev_queue_xmit(skb);
 
@@ -216,6 +210,7 @@ drop:
        kfree_skb(skb);
        return 0;
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_send);
 
 /* Returns null if this device is not attached to a datapath. */
 struct vport *ovs_netdev_get_vport(struct net_device *dev)
@@ -231,8 +226,7 @@ static struct vport_ops ovs_netdev_vport_ops = {
        .type           = OVS_VPORT_TYPE_NETDEV,
        .create         = netdev_create,
        .destroy        = netdev_destroy,
-       .get_name       = ovs_netdev_get_name,
-       .send           = netdev_send,
+       .send           = ovs_netdev_send,
 };
 
 int __init ovs_netdev_init(void)
index 6f7038e79c524cc66dc53188992b0ed9ff6c23ed..804412697a90c46f43214d2e80ac447c23aea6ae 100644 (file)
 
 struct vport *ovs_netdev_get_vport(struct net_device *dev);
 
-struct netdev_vport {
-       struct rcu_head rcu;
-
-       struct net_device *dev;
-};
-
-static inline struct netdev_vport *
-netdev_vport_priv(const struct vport *vport)
-{
-       return vport_priv(vport);
-}
-
-const char *ovs_netdev_get_name(const struct vport *);
+struct vport *ovs_netdev_link(struct vport *vport, const char *name);
+int ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
 void ovs_netdev_detach_dev(struct vport *);
+void ovs_vport_free_rcu(struct rcu_head *);
 
 int __init ovs_netdev_init(void);
 void ovs_netdev_exit(void);
index 6d39766e7828c4351a004fa8ce7d839348c3681d..547173336cd308567c6815170a7ff542710b5705 100644 (file)
  * 02110-1301, USA
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/net.h>
-#include <linux/rculist.h>
-#include <linux/udp.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
 #include <linux/module.h>
-
-#include <net/icmp.h>
-#include <net/ip.h>
 #include <net/udp.h>
 #include <net/ip_tunnels.h>
 #include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
 #include <net/vxlan.h>
 
 #include "datapath.h"
 #include "vport.h"
-#include "vport-vxlan.h"
-
-/**
- * struct vxlan_port - Keeps track of open UDP ports
- * @vs: vxlan_sock created for the port.
- * @name: vport name.
- */
-struct vxlan_port {
-       struct vxlan_sock *vs;
-       char name[IFNAMSIZ];
-       u32 exts; /* VXLAN_F_* in <net/vxlan.h> */
-};
+#include "vport-netdev.h"
 
-static struct vport_ops ovs_vxlan_vport_ops;
-
-static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
-{
-       return vport_priv(vport);
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
-                     struct vxlan_metadata *md)
-{
-       struct ovs_tunnel_info tun_info;
-       struct vxlan_port *vxlan_port;
-       struct vport *vport = vs->data;
-       struct iphdr *iph;
-       struct ovs_vxlan_opts opts = {
-               .gbp = md->gbp,
-       };
-       __be64 key;
-       __be16 flags;
-
-       flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
-       vxlan_port = vxlan_vport(vport);
-       if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
-               flags |= TUNNEL_VXLAN_OPT;
-
-       /* Save outer tunnel values */
-       iph = ip_hdr(skb);
-       key = cpu_to_be64(ntohl(md->vni) >> 8);
-       ovs_flow_tun_info_init(&tun_info, iph,
-                              udp_hdr(skb)->source, udp_hdr(skb)->dest,
-                              key, flags, &opts, sizeof(opts));
-
-       ovs_vport_receive(vport, skb, &tun_info);
-}
+static struct vport_ops ovs_vxlan_netdev_vport_ops;
 
 static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
 {
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+       struct vxlan_dev *vxlan = netdev_priv(vport->dev);
+       __be16 dst_port = vxlan->cfg.dst_port;
 
        if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
                return -EMSGSIZE;
 
-       if (vxlan_port->exts) {
+       if (vxlan->flags & VXLAN_F_GBP) {
                struct nlattr *exts;
 
                exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
                if (!exts)
                        return -EMSGSIZE;
 
-               if (vxlan_port->exts & VXLAN_F_GBP &&
+               if (vxlan->flags & VXLAN_F_GBP &&
                    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
                        return -EMSGSIZE;
 
@@ -114,23 +57,14 @@ static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
        return 0;
 }
 
-static void vxlan_tnl_destroy(struct vport *vport)
-{
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-
-       vxlan_sock_release(vxlan_port->vs);
-
-       ovs_vport_deferred_free(vport);
-}
-
-static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
        [OVS_VXLAN_EXT_GBP]     = { .type = NLA_FLAG, },
 };
 
-static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
+                               struct vxlan_config *conf)
 {
-       struct nlattr *exts[OVS_VXLAN_EXT_MAX+1];
-       struct vxlan_port *vxlan_port;
+       struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
        int err;
 
        if (nla_len(attr) < sizeof(struct nlattr))
@@ -140,10 +74,8 @@ static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
        if (err < 0)
                return err;
 
-       vxlan_port = vxlan_vport(vport);
-
        if (exts[OVS_VXLAN_EXT_GBP])
-               vxlan_port->exts |= VXLAN_F_GBP;
+               conf->flags |= VXLAN_F_GBP;
 
        return 0;
 }
@@ -152,128 +84,89 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
 {
        struct net *net = ovs_dp_get_net(parms->dp);
        struct nlattr *options = parms->options;
-       struct vxlan_port *vxlan_port;
-       struct vxlan_sock *vs;
+       struct net_device *dev;
        struct vport *vport;
        struct nlattr *a;
-       u16 dst_port;
        int err;
+       struct vxlan_config conf = {
+               .no_share = true,
+               .flags = VXLAN_F_FLOW_BASED | VXLAN_F_COLLECT_METADATA,
+       };
 
        if (!options) {
                err = -EINVAL;
                goto error;
        }
+
        a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
        if (a && nla_len(a) == sizeof(u16)) {
-               dst_port = nla_get_u16(a);
+               conf.dst_port = htons(nla_get_u16(a));
        } else {
                /* Require destination port from userspace. */
                err = -EINVAL;
                goto error;
        }
 
-       vport = ovs_vport_alloc(sizeof(struct vxlan_port),
-                               &ovs_vxlan_vport_ops, parms);
+       vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
        if (IS_ERR(vport))
                return vport;
 
-       vxlan_port = vxlan_vport(vport);
-       strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
-
        a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
        if (a) {
-               err = vxlan_configure_exts(vport, a);
+               err = vxlan_configure_exts(vport, a, &conf);
                if (err) {
                        ovs_vport_free(vport);
                        goto error;
                }
        }
 
-       vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
-                           vxlan_port->exts);
-       if (IS_ERR(vs)) {
+       rtnl_lock();
+       dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
+       if (IS_ERR(dev)) {
+               rtnl_unlock();
                ovs_vport_free(vport);
-               return (void *)vs;
+               return ERR_CAST(dev);
        }
-       vxlan_port->vs = vs;
 
+       dev_change_flags(dev, dev->flags | IFF_UP);
+       rtnl_unlock();
        return vport;
-
 error:
        return ERR_PTR(err);
 }
 
-static int vxlan_ext_gbp(struct sk_buff *skb)
+static struct vport *vxlan_create(const struct vport_parms *parms)
 {
-       const struct ovs_tunnel_info *tun_info;
-       const struct ovs_vxlan_opts *opts;
+       struct vport *vport;
 
-       tun_info = OVS_CB(skb)->egress_tun_info;
-       opts = tun_info->options;
+       vport = vxlan_tnl_create(parms);
+       if (IS_ERR(vport))
+               return vport;
 
-       if (tun_info->tunnel.tun_flags & TUNNEL_VXLAN_OPT &&
-           tun_info->options_len >= sizeof(*opts))
-               return opts->gbp;
-       else
-               return 0;
+       return ovs_netdev_link(vport, parms->name);
 }
 
-static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
+static void vxlan_destroy(struct vport *vport)
 {
-       struct net *net = ovs_dp_get_net(vport->dp);
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       struct sock *sk = vxlan_port->vs->sock->sk;
-       __be16 dst_port = inet_sk(sk)->inet_sport;
-       const struct ovs_key_ipv4_tunnel *tun_key;
-       struct vxlan_metadata md = {0};
-       struct rtable *rt;
-       struct flowi4 fl;
-       __be16 src_port;
-       __be16 df;
-       int err;
-       u32 vxflags;
+       rtnl_lock();
+       if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+               ovs_netdev_detach_dev(vport);
 
-       if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
-               err = -EINVAL;
-               goto error;
-       }
+       /* Early release so we can unregister the device */
+       dev_put(vport->dev);
+       rtnl_delete_link(vport->dev);
+       vport->dev = NULL;
+       rtnl_unlock();
 
-       tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
-       if (IS_ERR(rt)) {
-               err = PTR_ERR(rt);
-               goto error;
-       }
-
-       df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
-               htons(IP_DF) : 0;
-
-       skb->ignore_df = 1;
-
-       src_port = udp_flow_src_port(net, skb, 0, 0, true);
-       md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
-       md.gbp = vxlan_ext_gbp(skb);
-       vxflags = vxlan_port->exts |
-                     (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
-
-       err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
-                            tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
-                            src_port, dst_port,
-                            &md, false, vxflags);
-       if (err < 0)
-               ip_rt_put(rt);
-       return err;
-error:
-       kfree_skb(skb);
-       return err;
+       call_rcu(&vport->rcu, ovs_vport_free_rcu);
 }
 
 static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                    struct ovs_tunnel_info *egress_tun_info)
+                                    struct ip_tunnel_info *egress_tun_info)
 {
+       struct vxlan_dev *vxlan = netdev_priv(vport->dev);
        struct net *net = ovs_dp_get_net(vport->dp);
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+       __be16 dst_port = vxlan_dev_dst_port(vxlan);
        __be16 src_port;
        int port_min;
        int port_max;
@@ -287,31 +180,23 @@ static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
                                          src_port, dst_port);
 }
 
-static const char *vxlan_get_name(const struct vport *vport)
-{
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       return vxlan_port->name;
-}
-
-static struct vport_ops ovs_vxlan_vport_ops = {
-       .type           = OVS_VPORT_TYPE_VXLAN,
-       .create         = vxlan_tnl_create,
-       .destroy        = vxlan_tnl_destroy,
-       .get_name       = vxlan_get_name,
-       .get_options    = vxlan_get_options,
-       .send           = vxlan_tnl_send,
+static struct vport_ops ovs_vxlan_netdev_vport_ops = {
+       .type                   = OVS_VPORT_TYPE_VXLAN,
+       .create                 = vxlan_create,
+       .destroy                = vxlan_destroy,
+       .get_options            = vxlan_get_options,
+       .send                   = ovs_netdev_send,
        .get_egress_tun_info    = vxlan_get_egress_tun_info,
-       .owner          = THIS_MODULE,
 };
 
 static int __init ovs_vxlan_tnl_init(void)
 {
-       return ovs_vport_ops_register(&ovs_vxlan_vport_ops);
+       return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
 }
 
 static void __exit ovs_vxlan_tnl_exit(void)
 {
-       ovs_vport_ops_unregister(&ovs_vxlan_vport_ops);
+       ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
 }
 
 module_init(ovs_vxlan_tnl_init);
diff --git a/net/openvswitch/vport-vxlan.h b/net/openvswitch/vport-vxlan.h
deleted file mode 100644 (file)
index 4b08233..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef VPORT_VXLAN_H
-#define VPORT_VXLAN_H 1
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct ovs_vxlan_opts {
-       __u32 gbp;
-};
-
-#endif
index 067a3fff1d2cb0c629c1dc2d75d0353b9269ba71..d14f59403c5eb61cdde91cbe557617194756ccaf 100644 (file)
@@ -113,7 +113,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
        struct vport *vport;
 
        hlist_for_each_entry_rcu(vport, bucket, hash_node)
-               if (!strcmp(name, vport->ops->get_name(vport)) &&
+               if (!strcmp(name, ovs_vport_name(vport)) &&
                    net_eq(ovs_dp_get_net(vport->dp), net))
                        return vport;
 
@@ -226,7 +226,7 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
                }
 
                bucket = hash_bucket(ovs_dp_get_net(vport->dp),
-                                    vport->ops->get_name(vport));
+                                    ovs_vport_name(vport));
                hlist_add_head_rcu(&vport->hash_node, bucket);
                return vport;
        }
@@ -469,7 +469,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
  * skb->data should point to the Ethernet header.
  */
 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
-                      const struct ovs_tunnel_info *tun_info)
+                      const struct ip_tunnel_info *tun_info)
 {
        struct pcpu_sw_netstats *stats;
        struct sw_flow_key key;
@@ -572,22 +572,22 @@ void ovs_vport_deferred_free(struct vport *vport)
 }
 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
 
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
                               struct net *net,
-                              const struct ovs_tunnel_info *tun_info,
+                              const struct ip_tunnel_info *tun_info,
                               u8 ipproto,
                               u32 skb_mark,
                               __be16 tp_src,
                               __be16 tp_dst)
 {
-       const struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ip_tunnel_key *tun_key;
        struct rtable *rt;
        struct flowi4 fl;
 
        if (unlikely(!tun_info))
                return -EINVAL;
 
-       tun_key = &tun_info->tunnel;
+       tun_key = &tun_info->key;
 
        /* Route lookup to get srouce IP address.
         * The process may need to be changed if the corresponding process
@@ -602,22 +602,22 @@ int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
        /* Generate egress_tun_info based on tun_info,
         * saddr, tp_src and tp_dst
         */
-       __ovs_flow_tun_info_init(egress_tun_info,
-                                fl.saddr, tun_key->ipv4_dst,
-                                tun_key->ipv4_tos,
-                                tun_key->ipv4_ttl,
-                                tp_src, tp_dst,
-                                tun_key->tun_id,
-                                tun_key->tun_flags,
-                                tun_info->options,
-                                tun_info->options_len);
+       __ip_tunnel_info_init(egress_tun_info,
+                             fl.saddr, tun_key->ipv4_dst,
+                             tun_key->ipv4_tos,
+                             tun_key->ipv4_ttl,
+                             tp_src, tp_dst,
+                             tun_key->tun_id,
+                             tun_key->tun_flags,
+                             tun_info->options,
+                             tun_info->options_len);
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
 
 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                 struct ovs_tunnel_info *info)
+                                 struct ip_tunnel_info *info)
 {
        /* get_egress_tun_info() is only implemented on tunnel ports. */
        if (unlikely(!vport->ops->get_egress_tun_info))
index bc85331a6c60cae9182bd1348d35d81117cf2943..1a689c28b5a6356dc1ffe061c80fc537822a4e7b 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
+#include <net/route.h>
 
 #include "datapath.h"
 
@@ -58,15 +59,15 @@ u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
 
 int ovs_vport_send(struct vport *, struct sk_buff *);
 
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
                               struct net *net,
-                              const struct ovs_tunnel_info *tun_info,
+                              const struct ip_tunnel_info *tun_info,
                               u8 ipproto,
                               u32 skb_mark,
                               __be16 tp_src,
                               __be16 tp_dst);
 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                 struct ovs_tunnel_info *info);
+                                 struct ip_tunnel_info *info);
 
 /* The following definitions are for implementers of vport devices: */
 
@@ -106,7 +107,7 @@ struct vport_portids {
  * @detach_list: list used for detaching vport in net-exit call.
  */
 struct vport {
-       struct rcu_head rcu;
+       struct net_device *dev;
        struct datapath *dp;
        struct vport_portids __rcu *upcall_portids;
        u16 port_no;
@@ -119,6 +120,7 @@ struct vport {
 
        struct vport_err_stats err_stats;
        struct list_head detach_list;
+       struct rcu_head rcu;
 };
 
 /**
@@ -176,7 +178,7 @@ struct vport_ops {
 
        int (*send)(struct vport *, struct sk_buff *);
        int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
-                                  struct ovs_tunnel_info *);
+                                  struct ip_tunnel_info *);
 
        struct module *owner;
        struct list_head list;
@@ -226,7 +228,7 @@ static inline struct vport *vport_from_priv(void *priv)
 }
 
 void ovs_vport_receive(struct vport *, struct sk_buff *,
-                      const struct ovs_tunnel_info *);
+                      const struct ip_tunnel_info *);
 
 static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
                                      const void *start, unsigned int len)
@@ -235,11 +237,16 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
                skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
 }
 
+static inline const char *ovs_vport_name(struct vport *vport)
+{
+       return vport->dev ? vport->dev->name : vport->ops->get_name(vport);
+}
+
 int ovs_vport_ops_register(struct vport_ops *ops);
 void ovs_vport_ops_unregister(struct vport_ops *ops);
 
 static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
-                                                    const struct ovs_key_ipv4_tunnel *key,
+                                                    const struct ip_tunnel_key *key,
                                                     u32 mark,
                                                     struct flowi4 *fl,
                                                     u8 protocol)
index c9e8741226c6d8785e89532aa09e029c3fd9e487..b5afe538bb88e9b97eec2faa4e93acbb16be4fa0 100644 (file)
@@ -518,13 +518,11 @@ static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 }
 
 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
-               int tx_ring,
                struct sk_buff_head *rb_queue)
 {
        struct tpacket_kbdq_core *pkc;
 
-       pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
-                       GET_PBDQC_FROM_RB(&po->rx_ring);
+       pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 
        spin_lock_bh(&rb_queue->lock);
        pkc->delete_blk_timer = 1;
@@ -2403,7 +2401,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                }
                tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
                                          addr, hlen);
-               if (tp_len > dev->mtu + dev->hard_header_len) {
+               if (likely(tp_len >= 0) &&
+                   tp_len > dev->mtu + dev->hard_header_len) {
                        struct ethhdr *ehdr;
                        /* Earlier code assumed this would be a VLAN pkt,
                         * double-check this now that we have the actual
@@ -2784,7 +2783,7 @@ static int packet_release(struct socket *sock)
 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
 {
        struct packet_sock *po = pkt_sk(sk);
-       const struct net_device *dev_curr;
+       struct net_device *dev_curr;
        __be16 proto_curr;
        bool need_rehook;
 
@@ -2808,15 +2807,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
 
                po->num = proto;
                po->prot_hook.type = proto;
-
-               if (po->prot_hook.dev)
-                       dev_put(po->prot_hook.dev);
-
                po->prot_hook.dev = dev;
 
                po->ifindex = dev ? dev->ifindex : 0;
                packet_cached_dev_assign(po, dev);
        }
+       if (dev_curr)
+               dev_put(dev_curr);
 
        if (proto == 0 || !need_rehook)
                goto out_unlock;
@@ -4044,7 +4041,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        if (closing && (po->tp_version > TPACKET_V2)) {
                /* Because we don't support block-based V3 on tx-ring */
                if (!tx_ring)
-                       prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+                       prb_shutdown_retire_blk_timer(po, rb_queue);
        }
        release_sock(sk);
 
index 273b8bff6ba448aa013932f5ac7c9f929f49aa70..657ba9f5d30862a1f8add1302e8364bcd4ad44dc 100644 (file)
@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
        }
 
        ibmr = rds_ib_alloc_fmr(rds_ibdev);
-       if (IS_ERR(ibmr))
+       if (IS_ERR(ibmr)) {
+               rds_ib_dev_put(rds_ibdev);
                return ibmr;
+       }
 
        ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
        if (ret == 0)
index af427a3dbcba238103169ab2a58005feda5fa2f1..b087087ccfa94a47b96f9778342d4d021cf4ed1a 100644 (file)
 #include <net/act_api.h>
 #include <net/netlink.h>
 
+static void free_tcf(struct rcu_head *head)
+{
+       struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
+
+       free_percpu(p->cpu_bstats);
+       free_percpu(p->cpu_qstats);
+       kfree(p);
+}
+
 void tcf_hash_destroy(struct tc_action *a)
 {
        struct tcf_common *p = a->priv;
@@ -41,11 +50,11 @@ void tcf_hash_destroy(struct tc_action *a)
         * gen_estimator est_timer() might access p->tcfc_lock
         * or bstats, wait a RCU grace period before freeing p
         */
-       kfree_rcu(p, tcfc_rcu);
+       call_rcu(&p->tcfc_rcu, free_tcf);
 }
 EXPORT_SYMBOL(tcf_hash_destroy);
 
-int tcf_hash_release(struct tc_action *a, int bind)
+int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
 {
        struct tcf_common *p = a->priv;
        int ret = 0;
@@ -53,7 +62,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
        if (p) {
                if (bind)
                        p->tcfc_bindcnt--;
-               else if (p->tcfc_bindcnt > 0)
+               else if (strict && p->tcfc_bindcnt > 0)
                        return -EPERM;
 
                p->tcfc_refcnt--;
@@ -64,9 +73,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
                        ret = 1;
                }
        }
+
        return ret;
 }
-EXPORT_SYMBOL(tcf_hash_release);
+EXPORT_SYMBOL(__tcf_hash_release);
 
 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
                           struct tc_action *a)
@@ -136,7 +146,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
                head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
                hlist_for_each_entry_safe(p, n, head, tcfc_head) {
                        a->priv = p;
-                       ret = tcf_hash_release(a, 0);
+                       ret = __tcf_hash_release(a, false, true);
                        if (ret == ACT_P_DELETED) {
                                module_put(a->ops->owner);
                                n_i++;
@@ -230,15 +240,16 @@ void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
        if (est)
                gen_kill_estimator(&pc->tcfc_bstats,
                                   &pc->tcfc_rate_est);
-       kfree_rcu(pc, tcfc_rcu);
+       call_rcu(&pc->tcfc_rcu, free_tcf);
 }
 EXPORT_SYMBOL(tcf_hash_cleanup);
 
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-                   int size, int bind)
+                   int size, int bind, bool cpustats)
 {
        struct tcf_hashinfo *hinfo = a->ops->hinfo;
        struct tcf_common *p = kzalloc(size, GFP_KERNEL);
+       int err = -ENOMEM;
 
        if (unlikely(!p))
                return -ENOMEM;
@@ -246,18 +257,32 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
        if (bind)
                p->tcfc_bindcnt = 1;
 
+       if (cpustats) {
+               p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+               if (!p->cpu_bstats) {
+err1:
+                       kfree(p);
+                       return err;
+               }
+               p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+               if (!p->cpu_qstats) {
+err2:
+                       free_percpu(p->cpu_bstats);
+                       goto err1;
+               }
+       }
        spin_lock_init(&p->tcfc_lock);
        INIT_HLIST_NODE(&p->tcfc_head);
        p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
        p->tcfc_tm.install = jiffies;
        p->tcfc_tm.lastuse = jiffies;
        if (est) {
-               int err = gen_new_estimator(&p->tcfc_bstats, NULL,
-                                           &p->tcfc_rate_est,
-                                           &p->tcfc_lock, est);
+               err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
+                                       &p->tcfc_rate_est,
+                                       &p->tcfc_lock, est);
                if (err) {
-                       kfree(p);
-                       return err;
+                       free_percpu(p->cpu_qstats);
+                       goto err2;
                }
        }
 
@@ -408,7 +433,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
        int ret = 0;
 
        list_for_each_entry_safe(a, tmp, actions, list) {
-               ret = tcf_hash_release(a, bind);
+               ret = __tcf_hash_release(a, bind, true);
                if (ret == ACT_P_DELETED)
                        module_put(a->ops->owner);
                else if (ret < 0)
@@ -615,10 +640,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
        if (err < 0)
                goto errout;
 
-       if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
+       if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
            gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
                                     &p->tcfc_rate_est) < 0 ||
-           gnet_stats_copy_queue(&d, NULL,
+           gnet_stats_copy_queue(&d, p->cpu_qstats,
                                  &p->tcfc_qstats,
                                  p->tcfc_qstats.qlen) < 0)
                goto errout;
index 1d56903fd4c79aa008c4c540aabd8b4c099e81a1..1b97dabc621a59f3772738c3345e6f1bb5185958 100644 (file)
 struct tcf_bpf_cfg {
        struct bpf_prog *filter;
        struct sock_filter *bpf_ops;
-       char *bpf_name;
+       const char *bpf_name;
        u32 bpf_fd;
        u16 bpf_num_ops;
+       bool is_ebpf;
 };
 
 static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
@@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
        cfg->bpf_ops = bpf_ops;
        cfg->bpf_num_ops = bpf_num_ops;
        cfg->filter = fp;
+       cfg->is_ebpf = false;
 
        return 0;
 }
@@ -241,20 +243,42 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
        cfg->bpf_fd = bpf_fd;
        cfg->bpf_name = name;
        cfg->filter = fp;
+       cfg->is_ebpf = true;
 
        return 0;
 }
 
+static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
+{
+       if (cfg->is_ebpf)
+               bpf_prog_put(cfg->filter);
+       else
+               bpf_prog_destroy(cfg->filter);
+
+       kfree(cfg->bpf_ops);
+       kfree(cfg->bpf_name);
+}
+
+static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
+                                 struct tcf_bpf_cfg *cfg)
+{
+       cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
+       cfg->filter = prog->filter;
+
+       cfg->bpf_ops = prog->bpf_ops;
+       cfg->bpf_name = prog->bpf_name;
+}
+
 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action *act,
                        int replace, int bind)
 {
        struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+       struct tcf_bpf_cfg cfg, old;
        struct tc_act_bpf *parm;
        struct tcf_bpf *prog;
-       struct tcf_bpf_cfg cfg;
        bool is_bpf, is_ebpf;
-       int ret;
+       int ret, res = 0;
 
        if (!nla)
                return -EINVAL;
@@ -263,44 +287,49 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        if (ret < 0)
                return ret;
 
-       is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
-       is_ebpf = tb[TCA_ACT_BPF_FD];
-
-       if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
-           !tb[TCA_ACT_BPF_PARMS])
+       if (!tb[TCA_ACT_BPF_PARMS])
                return -EINVAL;
 
        parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
 
-       memset(&cfg, 0, sizeof(cfg));
-
-       ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
-                      tcf_bpf_init_from_efd(tb, &cfg);
-       if (ret < 0)
-               return ret;
-
        if (!tcf_hash_check(parm->index, act, bind)) {
                ret = tcf_hash_create(parm->index, est, act,
-                                     sizeof(*prog), bind);
+                                     sizeof(*prog), bind, false);
                if (ret < 0)
-                       goto destroy_fp;
+                       return ret;
 
-               ret = ACT_P_CREATED;
+               res = ACT_P_CREATED;
        } else {
                /* Don't override defaults. */
                if (bind)
-                       goto destroy_fp;
+                       return 0;
 
                tcf_hash_release(act, bind);
-               if (!replace) {
-                       ret = -EEXIST;
-                       goto destroy_fp;
-               }
+               if (!replace)
+                       return -EEXIST;
+       }
+
+       is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
+       is_ebpf = tb[TCA_ACT_BPF_FD];
+
+       if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
+               ret = -EINVAL;
+               goto out;
        }
 
+       memset(&cfg, 0, sizeof(cfg));
+
+       ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
+                      tcf_bpf_init_from_efd(tb, &cfg);
+       if (ret < 0)
+               goto out;
+
        prog = to_bpf(act);
        spin_lock_bh(&prog->tcf_lock);
 
+       if (ret != ACT_P_CREATED)
+               tcf_bpf_prog_fill_cfg(prog, &old);
+
        prog->bpf_ops = cfg.bpf_ops;
        prog->bpf_name = cfg.bpf_name;
 
@@ -314,31 +343,25 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
 
        spin_unlock_bh(&prog->tcf_lock);
 
-       if (ret == ACT_P_CREATED)
+       if (res == ACT_P_CREATED)
                tcf_hash_insert(act);
-
-       return ret;
-
-destroy_fp:
-       if (is_ebpf)
-               bpf_prog_put(cfg.filter);
        else
-               bpf_prog_destroy(cfg.filter);
+               tcf_bpf_cfg_cleanup(&old);
 
-       kfree(cfg.bpf_ops);
-       kfree(cfg.bpf_name);
+       return res;
+out:
+       if (res == ACT_P_CREATED)
+               tcf_hash_cleanup(act, est);
 
        return ret;
 }
 
 static void tcf_bpf_cleanup(struct tc_action *act, int bind)
 {
-       const struct tcf_bpf *prog = act->priv;
+       struct tcf_bpf_cfg tmp;
 
-       if (tcf_bpf_is_ebpf(prog))
-               bpf_prog_put(prog->filter);
-       else
-               bpf_prog_destroy(prog->filter);
+       tcf_bpf_prog_fill_cfg(act->priv, &tmp);
+       tcf_bpf_cfg_cleanup(&tmp);
 }
 
 static struct tc_action_ops act_bpf_ops __read_mostly = {
index 295d14bd6c678c31b56219371df83d4ebe3b0a2c..f2b540220ad02f1f8e3b2add9c7477a334081c3d 100644 (file)
@@ -108,7 +108,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
        parm = nla_data(tb[TCA_CONNMARK_PARMS]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*ci),
+                                     bind, false);
                if (ret)
                        return ret;
 
index 4cd5cf1aedf8b14bc8a8fb0529db868ee74433fd..b07c535ba8e7c6f8dcbc52f4eb69cf4a1ab3d0c2 100644 (file)
@@ -62,7 +62,8 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
        parm = nla_data(tb[TCA_CSUM_PARMS]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+                                     bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
index 7fffc2272701adb42d109318a80e8fef6fc3289f..5c1b051707363e19a779fcca2de6ae38ba0239fe 100644 (file)
 #ifdef CONFIG_GACT_PROB
 static int gact_net_rand(struct tcf_gact *gact)
 {
-       if (!gact->tcfg_pval || prandom_u32() % gact->tcfg_pval)
+       smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+       if (prandom_u32() % gact->tcfg_pval)
                return gact->tcf_action;
        return gact->tcfg_paction;
 }
 
 static int gact_determ(struct tcf_gact *gact)
 {
-       if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval)
+       u32 pack = atomic_inc_return(&gact->packets);
+
+       smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+       if (pack % gact->tcfg_pval)
                return gact->tcf_action;
        return gact->tcfg_paction;
 }
@@ -85,7 +89,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 #endif
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
+                                     bind, true);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
@@ -99,16 +104,19 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 
        gact = to_gact(a);
 
-       spin_lock_bh(&gact->tcf_lock);
+       ASSERT_RTNL();
        gact->tcf_action = parm->action;
 #ifdef CONFIG_GACT_PROB
        if (p_parm) {
                gact->tcfg_paction = p_parm->paction;
-               gact->tcfg_pval    = p_parm->pval;
+               gact->tcfg_pval    = max_t(u16, 1, p_parm->pval);
+               /* Make sure tcfg_pval is written before tcfg_ptype
+                * coupled with smp_rmb() in gact_net_rand() & gact_determ()
+                */
+               smp_wmb();
                gact->tcfg_ptype   = p_parm->ptype;
        }
 #endif
-       spin_unlock_bh(&gact->tcf_lock);
        if (ret == ACT_P_CREATED)
                tcf_hash_insert(a);
        return ret;
@@ -118,23 +126,21 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
                    struct tcf_result *res)
 {
        struct tcf_gact *gact = a->priv;
-       int action = TC_ACT_SHOT;
+       int action = READ_ONCE(gact->tcf_action);
 
-       spin_lock(&gact->tcf_lock);
 #ifdef CONFIG_GACT_PROB
-       if (gact->tcfg_ptype)
-               action = gact_rand[gact->tcfg_ptype](gact);
-       else
-               action = gact->tcf_action;
-#else
-       action = gact->tcf_action;
+       {
+       u32 ptype = READ_ONCE(gact->tcfg_ptype);
+
+       if (ptype)
+               action = gact_rand[ptype](gact);
+       }
 #endif
-       gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
-       gact->tcf_bstats.packets++;
+       bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
        if (action == TC_ACT_SHOT)
-               gact->tcf_qstats.drops++;
-       gact->tcf_tm.lastuse = jiffies;
-       spin_unlock(&gact->tcf_lock);
+               qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
+
+       tcf_lastuse_update(&gact->tcf_tm);
 
        return action;
 }
index cbc8dd7dd48a50e77fdafa7b8cf4041659995cbb..99c9cc1c7af9240f9df444ae158df4fa7f7f8c73 100644 (file)
@@ -114,7 +114,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                index = nla_get_u32(tb[TCA_IPT_INDEX]);
 
        if (!tcf_hash_check(index, a, bind) ) {
-               ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+               ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
index a42a3b257226178eb5af04054a17813c04368613..19cd8904efa0a46b9d659f36322a78e4cbb64c38 100644 (file)
@@ -35,9 +35,11 @@ static LIST_HEAD(mirred_list);
 static void tcf_mirred_release(struct tc_action *a, int bind)
 {
        struct tcf_mirred *m = to_mirred(a);
+       struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
+
        list_del(&m->tcfm_list);
-       if (m->tcfm_dev)
-               dev_put(m->tcfm_dev);
+       if (dev)
+               dev_put(dev);
 }
 
 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -93,7 +95,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        if (!tcf_hash_check(parm->index, a, bind)) {
                if (dev == NULL)
                        return -EINVAL;
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*m),
+                                     bind, true);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
@@ -105,18 +108,18 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        }
        m = to_mirred(a);
 
-       spin_lock_bh(&m->tcf_lock);
+       ASSERT_RTNL();
        m->tcf_action = parm->action;
        m->tcfm_eaction = parm->eaction;
        if (dev != NULL) {
                m->tcfm_ifindex = parm->ifindex;
                if (ret != ACT_P_CREATED)
-                       dev_put(m->tcfm_dev);
+                       dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
                dev_hold(dev);
-               m->tcfm_dev = dev;
+               rcu_assign_pointer(m->tcfm_dev, dev);
                m->tcfm_ok_push = ok_push;
        }
-       spin_unlock_bh(&m->tcf_lock);
+
        if (ret == ACT_P_CREATED) {
                list_add(&m->tcfm_list, &mirred_list);
                tcf_hash_insert(a);
@@ -131,20 +134,22 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
        struct tcf_mirred *m = a->priv;
        struct net_device *dev;
        struct sk_buff *skb2;
+       int retval, err;
        u32 at;
-       int retval, err = 1;
 
-       spin_lock(&m->tcf_lock);
-       m->tcf_tm.lastuse = jiffies;
-       bstats_update(&m->tcf_bstats, skb);
+       tcf_lastuse_update(&m->tcf_tm);
+
+       bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
 
-       dev = m->tcfm_dev;
-       if (!dev) {
-               printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+       rcu_read_lock();
+       retval = READ_ONCE(m->tcf_action);
+       dev = rcu_dereference(m->tcfm_dev);
+       if (unlikely(!dev)) {
+               pr_notice_once("tc mirred: target device is gone\n");
                goto out;
        }
 
-       if (!(dev->flags & IFF_UP)) {
+       if (unlikely(!(dev->flags & IFF_UP))) {
                net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
                                       dev->name);
                goto out;
@@ -152,7 +157,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
 
        at = G_TC_AT(skb->tc_verd);
        skb2 = skb_clone(skb, GFP_ATOMIC);
-       if (skb2 == NULL)
+       if (!skb2)
                goto out;
 
        if (!(at & AT_EGRESS)) {
@@ -168,16 +173,13 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
        skb2->dev = dev;
        err = dev_queue_xmit(skb2);
 
-out:
        if (err) {
-               m->tcf_qstats.overlimits++;
+out:
+               qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
                if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
                        retval = TC_ACT_SHOT;
-               else
-                       retval = m->tcf_action;
-       } else
-               retval = m->tcf_action;
-       spin_unlock(&m->tcf_lock);
+       }
+       rcu_read_unlock();
 
        return retval;
 }
@@ -216,14 +218,16 @@ static int mirred_device_event(struct notifier_block *unused,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct tcf_mirred *m;
 
+       ASSERT_RTNL();
        if (event == NETDEV_UNREGISTER)
                list_for_each_entry(m, &mirred_list, tcfm_list) {
-                       spin_lock_bh(&m->tcf_lock);
-                       if (m->tcfm_dev == dev) {
+                       if (rcu_access_pointer(m->tcfm_dev) == dev) {
                                dev_put(dev);
-                               m->tcfm_dev = NULL;
+                               /* Note : no rcu grace period necessary, as
+                                * net_device are already rcu protected.
+                                */
+                               RCU_INIT_POINTER(m->tcfm_dev, NULL);
                        }
-                       spin_unlock_bh(&m->tcf_lock);
                }
 
        return NOTIFY_DONE;
index 270a030d5fd099ee7b6f6d74d51b6015aa690647..5be0b3c1c5b0c9f17e3fbd4e1dc1c92c7a8e5aed 100644 (file)
@@ -55,7 +55,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        parm = nla_data(tb[TCA_NAT_PARMS]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+                                     bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
index 17e6d6669c7fdf138915ac9549d3f06d9535d745..e38a7701f154c97db2070b1e0b8b54fabdb8b0f3 100644 (file)
@@ -57,7 +57,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        if (!tcf_hash_check(parm->index, a, bind)) {
                if (!parm->nkeys)
                        return -EINVAL;
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+                                     bind, false);
                if (ret)
                        return ret;
                p = to_pedit(a);
@@ -68,13 +69,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                }
                ret = ACT_P_CREATED;
        } else {
-               p = to_pedit(a);
-               tcf_hash_release(a, bind);
                if (bind)
                        return 0;
+               tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
-
+               p = to_pedit(a);
                if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
                        keys = kmalloc(ksize, GFP_KERNEL);
                        if (keys == NULL)
index 6a8d9488613a76d9cb2bd03e932ef95487ca0745..d6b708d6afdf37e7c1af4e47873755fc84b1167f 100644 (file)
@@ -103,7 +103,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        defdata = nla_data(tb[TCA_DEF_DATA]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+                                     bind, false);
                if (ret)
                        return ret;
 
index fcfeeaf838beb9e75f07f7cbda7fb2b73237a17f..6751b5f8c046a59912b78762855e51af8e6f29e7 100644 (file)
@@ -99,7 +99,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+                                     bind, false);
                if (ret)
                        return ret;
 
index d735ecf0b1a78d3fac6ac80b95931cc6cf6caba0..796785e0bf96b0e65f598d3b2dad8256485d034a 100644 (file)
@@ -116,7 +116,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        action = parm->v_action;
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*v),
+                                     bind, false);
                if (ret)
                        return ret;
 
index c79ecfd36e0f028388ea5f96a64dbb23451b01b1..e5168f8b9640964ce2dd95a896a24f6c986a959a 100644 (file)
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                goto errout;
 
        if (oldprog) {
-               list_replace_rcu(&prog->link, &oldprog->link);
+               list_replace_rcu(&oldprog->link, &prog->link);
                tcf_unbind_filter(tp, &oldprog->res);
                call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
        } else {
index ea611b21641241737223f34334c0189df00d11e7..4c85bd3a750cbb02c743779f28cbde6ceacb5ecf 100644 (file)
@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                               struct tcf_result *res)
 {
        struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
-       u32 classid;
-
-       classid = task_cls_state(current)->classid;
-
-       /*
-        * Due to the nature of the classifier it is required to ignore all
-        * packets originating from softirq context as accessing `current'
-        * would lead to false results.
-        *
-        * This test assumes that all callers of dev_queue_xmit() explicitely
-        * disable bh. Knowing this, it is possible to detect softirq based
-        * calls by looking at the number of nested bh disable calls because
-        * softirqs always disables bh.
-        */
-       if (in_serving_softirq()) {
-               /* If there is an sk_classid we'll use that. */
-               if (!skb->sk)
-                       return -1;
-               classid = skb->sk->sk_classid;
-       }
+       u32 classid = task_get_classid(skb);
 
        if (!classid)
                return -1;
-
        if (!tcf_em_tree_match(skb, &head->ematches, NULL))
                return -1;
 
        res->classid = classid;
        res->class = 0;
+
        return tcf_exts_exec(skb, &head->exts, res);
 }
 
index 76bc3a20ffdb31bb4c9b51942de74c64928c2a3a..bb2a0f529c1f519f79f22ffa046013d7ac863eb5 100644 (file)
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
        if (!fnew)
                goto err2;
 
+       tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+
        fold = (struct flow_filter *)*arg;
        if (fold) {
                err = -EINVAL;
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
                fnew->mask  = ~0U;
                fnew->tp = tp;
                get_random_bytes(&fnew->hashrnd, 4);
-               tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
        }
 
        fnew->perturb_timer.function = flow_perturbation;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
        if (*arg == 0)
                list_add_tail_rcu(&fnew->list, &head->filters);
        else
-               list_replace_rcu(&fnew->list, &fold->list);
+               list_replace_rcu(&fold->list, &fnew->list);
 
        *arg = (unsigned long)fnew;
 
index 9d37ccd95062a6840d1bb1e140b173dd1fe0b9d0..2f3d03f99487ed35c4af1e5c7ede590e4b0e3721 100644 (file)
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        *arg = (unsigned long) fnew;
 
        if (fold) {
-               list_replace_rcu(&fnew->list, &fold->list);
+               list_replace_rcu(&fold->list, &fnew->list);
                tcf_unbind_filter(tp, &fold->res);
                call_rcu(&fold->rcu, fl_destroy_filter);
        } else {
index 93d5742dc7e0f9730abd1726adf47857de1e64d4..6a783afe4960052912bf1241a22ab16d38d9e81a 100644 (file)
@@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch)
 {
        struct choke_sched_data *q = qdisc_priv(sch);
 
+       while (q->head != q->tail) {
+               struct sk_buff *skb = q->tab[q->head];
+
+               q->head = (q->head + 1) & q->tab_mask;
+               if (!skb)
+                       continue;
+               qdisc_qstats_backlog_dec(sch, skb);
+               --sch->q.qlen;
+               qdisc_drop(skb, sch);
+       }
+
+       memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
+       q->head = q->tail = 0;
        red_restart(&q->vars);
 }
 
index d75993f89facc0ce8d5df0d26aedcd016714a43e..21ca33c9f0368b21cdb00fbdbbca4851c2ad87a2 100644 (file)
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
        skb = dequeue_head(flow);
        len = qdisc_pkt_len(skb);
        q->backlogs[idx] -= len;
-       kfree_skb(skb);
        sch->q.qlen--;
        qdisc_qstats_drop(sch);
        qdisc_qstats_backlog_dec(sch, skb);
+       kfree_skb(skb);
        flow->dropped++;
        return idx;
 }
 
+static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
+{
+       unsigned int prev_backlog;
+
+       prev_backlog = sch->qstats.backlog;
+       fq_codel_drop(sch);
+       return prev_backlog - sch->qstats.backlog;
+}
+
 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
        struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -604,7 +613,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
        .enqueue        =       fq_codel_enqueue,
        .dequeue        =       fq_codel_dequeue,
        .peek           =       qdisc_peek_dequeued,
-       .drop           =       fq_codel_drop,
+       .drop           =       fq_codel_qdisc_drop,
        .init           =       fq_codel_init,
        .reset          =       fq_codel_reset,
        .destroy        =       fq_codel_destroy,
index 89f8fcf73f18f6e091881cc829861285c0c8f8b7..ade9445a55abe468107f2bcb233e48da027afb4b 100644 (file)
@@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
        .peek        =       qdisc_peek_head,
        .init        =       plug_init,
        .change      =       plug_change,
+       .reset       =       qdisc_reset_queue,
        .owner       =       THIS_MODULE,
 };
 
index b8d73bca683cc25d946163d9e856d8ed332ad4d7..ffaeea63d47381c480a7c97e00c64c7a4eb6e80d 100644 (file)
@@ -186,7 +186,6 @@ struct qfq_sched {
 
        u64                     oldV, V;        /* Precise virtual times. */
        struct qfq_aggregate    *in_serv_agg;   /* Aggregate being served. */
-       u32                     num_active_agg; /* Num. of active aggregates */
        u32                     wsum;           /* weight sum */
        u32                     iwsum;          /* inverse weight sum */
 
index 7d14926633601b85c2d281d914fa978c8a038e10..52f75a5473e120f8d0a02a2ff1c0215ff02437b5 100644 (file)
@@ -306,10 +306,10 @@ drop:
                len = qdisc_pkt_len(skb);
                slot->backlog -= len;
                sfq_dec(q, x);
-               kfree_skb(skb);
                sch->q.qlen--;
                qdisc_qstats_drop(sch);
                qdisc_qstats_backlog_dec(sch, skb);
+               kfree_skb(skb);
                return len;
        }
 
index 59e80356672bdf89777265ae1f8c384792dfb98c..4345790ad3266c353eeac5398593c2a9ce4effda 100644 (file)
@@ -487,23 +487,35 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
         */
        rcu_read_lock();
        list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+               struct net_device *odev;
+
                if (!laddr->valid)
                        continue;
-               if ((laddr->state == SCTP_ADDR_SRC) &&
-                   (AF_INET == laddr->a.sa.sa_family)) {
-                       fl4->fl4_sport = laddr->a.v4.sin_port;
-                       flowi4_update_output(fl4,
-                                            asoc->base.sk->sk_bound_dev_if,
-                                            RT_CONN_FLAGS(asoc->base.sk),
-                                            daddr->v4.sin_addr.s_addr,
-                                            laddr->a.v4.sin_addr.s_addr);
-
-                       rt = ip_route_output_key(sock_net(sk), fl4);
-                       if (!IS_ERR(rt)) {
-                               dst = &rt->dst;
-                               goto out_unlock;
-                       }
-               }
+               if (laddr->state != SCTP_ADDR_SRC ||
+                   AF_INET != laddr->a.sa.sa_family)
+                       continue;
+
+               fl4->fl4_sport = laddr->a.v4.sin_port;
+               flowi4_update_output(fl4,
+                                    asoc->base.sk->sk_bound_dev_if,
+                                    RT_CONN_FLAGS(asoc->base.sk),
+                                    daddr->v4.sin_addr.s_addr,
+                                    laddr->a.v4.sin_addr.s_addr);
+
+               rt = ip_route_output_key(sock_net(sk), fl4);
+               if (IS_ERR(rt))
+                       continue;
+
+               /* Ensure the src address belongs to the output
+                * interface.
+                */
+               odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+                                    false);
+               if (!odev || odev->ifindex != fl4->flowi4_oif)
+                       continue;
+
+               dst = &rt->dst;
+               break;
        }
 
 out_unlock:
index 3ee27b7704ffb95430541507e83973e9207f9672..d7eaa7354cf76148d1a2c9ee3af4fff9a24990fb 100644 (file)
@@ -853,7 +853,7 @@ nomem:
 
 /*
  * Respond to a normal COOKIE ACK chunk.
- * We are the side that is being asked for an association.
+ * We are the side that is asking for an association.
  *
  * RFC 2960 5.1 Normal Establishment of an Association
  *
index 1425ec2bbd5ae359a8e0408a89a6da6bb60bd87e..17bef01b9aa3e7f75328d39fc976f9e80d641e92 100644 (file)
@@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
        if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
                return -EFAULT;
 
-       if (sctp_sk(sk)->subscribe.sctp_data_io_event)
-               pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
-                                   "Requested SCTP_SNDRCVINFO event.\n"
-                                   "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
-                                   current->comm, task_pid_nr(current));
-
        /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
         * if there is no data to be sent or retransmit, the stack will
         * immediately send up this notification.
index 9825ff0f91d6c0bde819105f639cae21883bbfad..6255d141133bb0ccabc195c8b4d9bce8cbe39c51 100644 (file)
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
                req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
                if (!req)
                        goto not_found;
-               /* Note: this 'free' request adds it to xprt->bc_pa_list */
-               xprt_free_bc_request(req);
+               list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+               xprt->bc_alloc_count++;
        }
        req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
                                rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
 
        spin_lock(&xprt->bc_pa_lock);
        list_del(&req->rq_bc_pa_list);
-       xprt->bc_alloc_count--;
+       xprt_dec_alloc_count(xprt, 1);
        spin_unlock(&xprt->bc_pa_lock);
 
        req->rq_private_buf.len = copied;
index cbc6af923dd1cb0baabc95161989133150269d4f..23608eb0ded2c94e363ed06e35384b08417e04ed 100644 (file)
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
 
        switch (task->tk_status) {
        case -EAGAIN:
+       case -ENOBUFS:
                break;
        default:
                dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
        case -ECONNABORTED:
        case -EADDRINUSE:
        case -ENOTCONN:
-       case -ENOBUFS:
        case -EPIPE:
                rpc_task_force_reencode(task);
        }
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
        case -ECONNABORTED:
                rpc_force_rebind(clnt);
        case -EADDRINUSE:
-       case -ENOBUFS:
                rpc_delay(task, 3*HZ);
        case -EPIPE:
        case -ENOTCONN:
                task->tk_action = call_bind;
                break;
+       case -ENOBUFS:
+               rpc_delay(task, HZ>>2);
        case -EAGAIN:
                task->tk_action = call_transmit;
                break;
index e193c2b5476b3a83973e9799e2e826fdcd2b842c..0030376327b77f0a08d4af887286a0616a60069e 100644 (file)
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
                              true, &sent);
        dprintk("RPC:       %s(%u) = %d\n",
                        __func__, xdr->len - req->rq_bytes_sent, status);
+
+       if (status == -EAGAIN && sock_writeable(transport->inet))
+               status = -ENOBUFS;
+
        if (likely(sent > 0) || status == 0) {
                req->rq_bytes_sent += sent;
                req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
 
        switch (status) {
        case -ENOBUFS:
+               break;
        case -EAGAIN:
                status = xs_nospace(task);
                break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
        if (status == -EPERM)
                goto process_status;
 
+       if (status == -EAGAIN && sock_writeable(transport->inet))
+               status = -ENOBUFS;
+
        if (sent > 0 || status == 0) {
                req->rq_xmit_bytes_sent += sent;
                if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
                dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
                                xdr->len - req->rq_bytes_sent, status);
 
-               if (unlikely(sent == 0 && status < 0))
-                       break;
-
                /* If we've sent the entire packet, immediately
                 * reset the count of bytes sent. */
                req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
                        return 0;
                }
 
-               if (sent != 0)
-                       continue;
-               status = -EAGAIN;
-               break;
+               if (status < 0)
+                       break;
+               if (sent == 0) {
+                       status = -EAGAIN;
+                       break;
+               }
        }
+       if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
+               status = -ENOBUFS;
 
        switch (status) {
        case -ENOTSOCK:
                status = -ENOTCONN;
                /* Should we call xs_close() here? */
                break;
-       case -ENOBUFS:
        case -EAGAIN:
                status = xs_nospace(task);
                break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
        case -ECONNREFUSED:
        case -ENOTCONN:
        case -EADDRINUSE:
+       case -ENOBUFS:
        case -EPIPE:
                clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
        }
index 9f2add3cba26e54eadc15aeea05c3db167a75665..33bafa2e703e299f3b423d1f95b2a21cf177c634 100644 (file)
@@ -910,13 +910,9 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
                if (switchdev_port_attr_get(dev, &attr))
                        return NULL;
 
-               if (nhsel > 0) {
-                       if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
+               if (nhsel > 0 &&
+                   !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
                                return NULL;
-                       if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
-                                  attr.u.ppid.id_len))
-                               return NULL;
-               }
 
                prev_attr = attr;
        }
@@ -1043,3 +1039,106 @@ void switchdev_fib_ipv4_abort(struct fib_info *fi)
        fi->fib_net->ipv4.fib_offload_disabled = true;
 }
 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
+
+static bool switchdev_port_same_parent_id(struct net_device *a,
+                                         struct net_device *b)
+{
+       struct switchdev_attr a_attr = {
+               .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+               .flags = SWITCHDEV_F_NO_RECURSE,
+       };
+       struct switchdev_attr b_attr = {
+               .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+               .flags = SWITCHDEV_F_NO_RECURSE,
+       };
+
+       if (switchdev_port_attr_get(a, &a_attr) ||
+           switchdev_port_attr_get(b, &b_attr))
+               return false;
+
+       return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
+}
+
+static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
+                                      struct net_device *group_dev)
+{
+       struct net_device *lower_dev;
+       struct list_head *iter;
+
+       netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+               if (lower_dev == dev)
+                       continue;
+               if (switchdev_port_same_parent_id(dev, lower_dev))
+                       return lower_dev->offload_fwd_mark;
+               return switchdev_port_fwd_mark_get(dev, lower_dev);
+       }
+
+       return dev->ifindex;
+}
+
+static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
+                                         u32 old_mark, u32 *reset_mark)
+{
+       struct net_device *lower_dev;
+       struct list_head *iter;
+
+       netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+               if (lower_dev->offload_fwd_mark == old_mark) {
+                       if (!*reset_mark)
+                               *reset_mark = lower_dev->ifindex;
+                       lower_dev->offload_fwd_mark = *reset_mark;
+               }
+               switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
+       }
+}
+
+/**
+ *     switchdev_port_fwd_mark_set - Set port offload forwarding mark
+ *
+ *     @dev: port device
+ *     @group_dev: containing device
+ *     @joining: true if dev is joining group; false if leaving group
+ *
+ *     An ungrouped port's offload mark is just its ifindex.  A grouped
+ *     port's (member of a bridge, for example) offload mark is the ifindex
+ *     of one of the ports in the group with the same parent (switch) ID.
+ *     Ports on the same device in the same group will have the same mark.
+ *
+ *     Example:
+ *
+ *             br0             ifindex=9
+ *               sw1p1         ifindex=2       mark=2
+ *               sw1p2         ifindex=3       mark=2
+ *               sw2p1         ifindex=4       mark=5
+ *               sw2p2         ifindex=5       mark=5
+ *
+ *     If sw2p2 leaves the bridge, we'll have:
+ *
+ *             br0             ifindex=9
+ *               sw1p1         ifindex=2       mark=2
+ *               sw1p2         ifindex=3       mark=2
+ *               sw2p1         ifindex=4       mark=4
+ *             sw2p2           ifindex=5       mark=5
+ */
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+                                struct net_device *group_dev,
+                                bool joining)
+{
+       u32 mark = dev->ifindex;
+       u32 reset_mark = 0;
+
+       if (group_dev && joining) {
+               mark = switchdev_port_fwd_mark_get(dev, group_dev);
+       } else if (group_dev && !joining) {
+               if (dev->offload_fwd_mark == mark)
+                       /* Ohoh, this port was the mark reference port,
+                        * but it's leaving the group, so reset the
+                        * mark for the remaining ports in the group.
+                        */
+                       switchdev_port_fwd_mark_reset(group_dev, mark,
+                                                     &reset_mark);
+       }
+
+       dev->offload_fwd_mark = mark;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
index a816382fc8af1b9efb016f888493ca4dcc65fe3b..8b010c976b2f7c8eba5f6fe1cadb516b4e0f7269 100644 (file)
@@ -316,6 +316,29 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
        }
 }
 
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
+{
+       u16 last = msg_last_bcast(hdr);
+       int mtyp = msg_type(hdr);
+
+       if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
+               return;
+       if (mtyp == STATE_MSG) {
+               tipc_bclink_update_link_state(n, last);
+               return;
+       }
+       /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
+        * and transfer synch info in LINK_PROTOCOL messages.
+        */
+       if (tipc_node_is_up(n))
+               return;
+       if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
+               return;
+       n->bclink.last_sent = last;
+       n->bclink.last_in = last;
+       n->bclink.oos_state = 0;
+}
+
 /**
  * bclink_peek_nack - monitor retransmission requests sent by other nodes
  *
@@ -358,10 +381,9 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
 
        /* Prepare clone of message for local node */
        skb = tipc_msg_reassemble(list);
-       if (unlikely(!skb)) {
-               __skb_queue_purge(list);
+       if (unlikely(!skb))
                return -EHOSTUNREACH;
-       }
+
        /* Broadcast to all nodes */
        if (likely(bclink)) {
                tipc_bclink_lock(net);
@@ -413,7 +435,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
         * all nodes in the cluster don't ACK at the same time
         */
        if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
-               tipc_link_proto_xmit(node->active_links[node->addr & 1],
+               tipc_link_proto_xmit(node_active_link(node, node->addr),
                                     STATE_MSG, 0, 0, 0, 0);
                tn->bcl->stats.sent_acks++;
        }
@@ -925,7 +947,6 @@ int tipc_bclink_init(struct net *net)
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
        bcl->bearer_id = MAX_BEARERS;
        rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
-       bcl->state = WORKING_WORKING;
        bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
        msg_set_prevnode(bcl->pmsg, tn->own_addr);
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
index 3c290a48f72037ece5eddfb55c55d501e7f61e67..d74c69bcf60bda5e04ddc61a988afd6fa94bee66 100644 (file)
@@ -133,5 +133,6 @@ void tipc_bclink_wakeup_users(struct net *net);
 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
 void tipc_bclink_input(struct net *net);
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
 
 #endif
index 00bc0e6205326025212a85e1110bab48c208e73a..ce9f7bfc0b92444950f51893e87abbc426151eb6 100644 (file)
@@ -343,7 +343,7 @@ restart:
 static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
 {
        pr_info("Resetting bearer <%s>\n", b_ptr->name);
-       tipc_link_delete_list(net, b_ptr->identity);
+       tipc_node_delete_links(net, b_ptr->identity);
        tipc_disc_reset(net, b_ptr);
        return 0;
 }
@@ -361,7 +361,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        b_ptr->media->disable_media(b_ptr);
 
-       tipc_link_delete_list(net, b_ptr->identity);
+       tipc_node_delete_links(net, b_ptr->identity);
        if (b_ptr->link_req)
                tipc_disc_delete(b_ptr->link_req);
 
@@ -470,6 +470,32 @@ void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
        rcu_read_unlock();
 }
 
+/* tipc_bearer_xmit() -send buffer to destination over bearer
+ */
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+                     struct sk_buff_head *xmitq,
+                     struct tipc_media_addr *dst)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bearer *b;
+       struct sk_buff *skb, *tmp;
+
+       if (skb_queue_empty(xmitq))
+               return;
+
+       rcu_read_lock();
+       b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+       if (likely(b)) {
+               skb_queue_walk_safe(xmitq, skb, tmp) {
+                       __skb_dequeue(xmitq);
+                       b->media->send_msg(net, skb, b, dst);
+                       /* Until we remove cloning in tipc_l2_send_msg(): */
+                       kfree_skb(skb);
+               }
+       }
+       rcu_read_unlock();
+}
+
 /**
  * tipc_l2_rcv_msg - handle incoming TIPC message from an interface
  * @buf: the received packet
index dc714d977768c105cff0b774b49be1e5ec1c59fd..6426f242f6262e80594cd1cdc438c4a94f4c7026 100644 (file)
@@ -217,5 +217,8 @@ void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
 void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
                      struct tipc_media_addr *dest);
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+                     struct sk_buff_head *xmitq,
+                     struct tipc_media_addr *dst);
 
 #endif /* _TIPC_BEARER_H */
index 0fcf133d5cb7cef0f33478412cb75809b68a8223..b96b41eabf121cc8577b65d0ad5bb0727ca5d3f7 100644 (file)
@@ -109,6 +109,11 @@ struct tipc_net {
        atomic_t subscription_count;
 };
 
+static inline struct tipc_net *tipc_net(struct net *net)
+{
+       return net_generic(net, tipc_net_id);
+}
+
 static inline u16 mod(u16 x)
 {
        return x & 0xffffu;
@@ -129,6 +134,11 @@ static inline int less(u16 left, u16 right)
        return less_eq(left, right) && (mod(right) != mod(left));
 }
 
+static inline int in_range(u16 val, u16 min, u16 max)
+{
+       return !less(val, min) && !more(val, max);
+}
+
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
 void tipc_unregister_sysctl(void);
index 967e292f53c89182bc0ed128b1dacd51fe02d090..d14e0a4aa9af900a7ace6855ab91eb2bcc901641 100644 (file)
@@ -35,7 +35,7 @@
  */
 
 #include "core.h"
-#include "link.h"
+#include "node.h"
 #include "discover.h"
 
 /* min delay during bearer start up */
@@ -120,30 +120,24 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
  * @buf: buffer containing message
  * @bearer: bearer that message arrived on
  */
-void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
                   struct tipc_bearer *bearer)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_node *node;
-       struct tipc_link *link;
        struct tipc_media_addr maddr;
-       struct sk_buff *rbuf;
-       struct tipc_msg *msg = buf_msg(buf);
-       u32 ddom = msg_dest_domain(msg);
-       u32 onode = msg_prevnode(msg);
-       u32 net_id = msg_bc_netid(msg);
-       u32 mtyp = msg_type(msg);
-       u32 signature = msg_node_sig(msg);
-       u16 caps = msg_node_capabilities(msg);
-       bool addr_match = false;
-       bool sign_match = false;
-       bool link_up = false;
-       bool accept_addr = false;
-       bool accept_sign = false;
+       struct sk_buff *rskb;
+       struct tipc_msg *hdr = buf_msg(skb);
+       u32 ddom = msg_dest_domain(hdr);
+       u32 onode = msg_prevnode(hdr);
+       u32 net_id = msg_bc_netid(hdr);
+       u32 mtyp = msg_type(hdr);
+       u32 signature = msg_node_sig(hdr);
+       u16 caps = msg_node_capabilities(hdr);
        bool respond = false;
+       bool dupl_addr = false;
 
-       bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
-       kfree_skb(buf);
+       bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
+       kfree_skb(skb);
 
        /* Ensure message from node is valid and communication is permitted */
        if (net_id != tn->net_id)
@@ -165,102 +159,20 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
        if (!tipc_in_scope(bearer->domain, onode))
                return;
 
-       node = tipc_node_create(net, onode);
-       if (!node)
-               return;
-       tipc_node_lock(node);
-       node->capabilities = caps;
-       link = node->links[bearer->identity];
-
-       /* Prepare to validate requesting node's signature and media address */
-       sign_match = (signature == node->signature);
-       addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
-       link_up = link && tipc_link_is_up(link);
-
-
-       /* These three flags give us eight permutations: */
-
-       if (sign_match && addr_match && link_up) {
-               /* All is fine. Do nothing. */
-       } else if (sign_match && addr_match && !link_up) {
-               /* Respond. The link will come up in due time */
-               respond = true;
-       } else if (sign_match && !addr_match && link_up) {
-               /* Peer has changed i/f address without rebooting.
-                * If so, the link will reset soon, and the next
-                * discovery will be accepted. So we can ignore it.
-                * It may also be an cloned or malicious peer having
-                * chosen the same node address and signature as an
-                * existing one.
-                * Ignore requests until the link goes down, if ever.
-                */
-               disc_dupl_alert(bearer, onode, &maddr);
-       } else if (sign_match && !addr_match && !link_up) {
-               /* Peer link has changed i/f address without rebooting.
-                * It may also be a cloned or malicious peer; we can't
-                * distinguish between the two.
-                * The signature is correct, so we must accept.
-                */
-               accept_addr = true;
-               respond = true;
-       } else if (!sign_match && addr_match && link_up) {
-               /* Peer node rebooted. Two possibilities:
-                *  - Delayed re-discovery; this link endpoint has already
-                *    reset and re-established contact with the peer, before
-                *    receiving a discovery message from that node.
-                *    (The peer happened to receive one from this node first).
-                *  - The peer came back so fast that our side has not
-                *    discovered it yet. Probing from this side will soon
-                *    reset the link, since there can be no working link
-                *    endpoint at the peer end, and the link will re-establish.
-                *  Accept the signature, since it comes from a known peer.
-                */
-               accept_sign = true;
-       } else if (!sign_match && addr_match && !link_up) {
-               /*  The peer node has rebooted.
-                *  Accept signature, since it is a known peer.
-                */
-               accept_sign = true;
-               respond = true;
-       } else if (!sign_match && !addr_match && link_up) {
-               /* Peer rebooted with new address, or a new/duplicate peer.
-                * Ignore until the link goes down, if ever.
-                */
+       tipc_node_check_dest(net, onode, bearer, caps, signature,
+                            &maddr, &respond, &dupl_addr);
+       if (dupl_addr)
                disc_dupl_alert(bearer, onode, &maddr);
-       } else if (!sign_match && !addr_match && !link_up) {
-               /* Peer rebooted with new address, or it is a new peer.
-                * Accept signature and address.
-               */
-               accept_sign = true;
-               accept_addr = true;
-               respond = true;
-       }
-
-       if (accept_sign)
-               node->signature = signature;
-
-       if (accept_addr) {
-               if (!link)
-                       link = tipc_link_create(node, bearer, &maddr);
-               if (link) {
-                       memcpy(&link->media_addr, &maddr, sizeof(maddr));
-                       tipc_link_reset(link);
-               } else {
-                       respond = false;
-               }
-       }
 
        /* Send response, if necessary */
        if (respond && (mtyp == DSC_REQ_MSG)) {
-               rbuf = tipc_buf_acquire(MAX_H_SIZE);
-               if (rbuf) {
-                       tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
-                       tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
-                       kfree_skb(rbuf);
+               rskb = tipc_buf_acquire(MAX_H_SIZE);
+               if (rskb) {
+                       tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+                       tipc_bearer_send(net, bearer->identity, rskb, &maddr);
+                       kfree_skb(rskb);
                }
        }
-       tipc_node_unlock(node);
-       tipc_node_put(node);
 }
 
 /**
index eaa9fe54b4aebfb531610611637915dc1b0c7256..f067e5425560fe0d43c184589a397d614c12573b 100644 (file)
@@ -48,9 +48,8 @@
 /*
  * Error message prefixes
  */
-static const char *link_co_err = "Link changeover error, ";
+static const char *link_co_err = "Link tunneling error, ";
 static const char *link_rst_msg = "Resetting link ";
-static const char *link_unk_evt = "Unknown link event ";
 
 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
        [TIPC_NLA_LINK_UNSPEC]          = { .type = NLA_UNSPEC },
@@ -76,257 +75,414 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
        [TIPC_NLA_PROP_WIN]             = { .type = NLA_U32 }
 };
 
+/*
+ * Interval between NACKs when packets arrive out of order
+ */
+#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
 /*
  * Out-of-range value for link session numbers
  */
-#define INVALID_SESSION 0x10000
+#define WILDCARD_SESSION 0x10000
 
-/*
- * Link state events:
+/* Link FSM states:
  */
-#define  STARTING_EVT    856384768     /* link processing trigger */
-#define  TRAFFIC_MSG_EVT 560815u       /* rx'd ??? */
-#define  SILENCE_EVT     560817u       /* timer dicovered silence from peer */
+enum {
+       LINK_ESTABLISHED     = 0xe,
+       LINK_ESTABLISHING    = 0xe  << 4,
+       LINK_RESET           = 0x1  << 8,
+       LINK_RESETTING       = 0x2  << 12,
+       LINK_PEER_RESET      = 0xd  << 16,
+       LINK_FAILINGOVER     = 0xf  << 20,
+       LINK_SYNCHING        = 0xc  << 24
+};
 
-/*
- * State value stored in 'failover_pkts'
+/* Link FSM state checking routines
  */
-#define FIRST_FAILOVER 0xffffu
-
-static void link_handle_out_of_seq_msg(struct tipc_link *link,
-                                      struct sk_buff *skb);
-static void tipc_link_proto_rcv(struct tipc_link *link,
-                               struct sk_buff *skb);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
-static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static int link_is_up(struct tipc_link *l)
+{
+       return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
+}
+
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+                              struct sk_buff_head *xmitq);
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+                                     u16 rcvgap, int tolerance, int priority,
+                                     struct sk_buff_head *xmitq);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
-static void link_set_timer(struct tipc_link *link, unsigned long time);
+
 /*
- *  Simple link routines
+ *  Simple non-static link routines (i.e. referenced outside this file)
  */
-static unsigned int align(unsigned int i)
+bool tipc_link_is_up(struct tipc_link *l)
 {
-       return (i + 3) & ~3u;
+       return link_is_up(l);
 }
 
-static void tipc_link_release(struct kref *kref)
+bool tipc_link_is_reset(struct tipc_link *l)
 {
-       kfree(container_of(kref, struct tipc_link, ref));
+       return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 }
 
-static void tipc_link_get(struct tipc_link *l_ptr)
+bool tipc_link_is_synching(struct tipc_link *l)
 {
-       kref_get(&l_ptr->ref);
+       return l->state == LINK_SYNCHING;
 }
 
-static void tipc_link_put(struct tipc_link *l_ptr)
+bool tipc_link_is_failingover(struct tipc_link *l)
 {
-       kref_put(&l_ptr->ref, tipc_link_release);
+       return l->state == LINK_FAILINGOVER;
 }
 
-static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
+bool tipc_link_is_blocked(struct tipc_link *l)
 {
-       if (l->owner->active_links[0] != l)
-               return l->owner->active_links[0];
-       return l->owner->active_links[1];
+       return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
 }
 
-/*
- *  Simple non-static link routines (i.e. referenced outside this file)
- */
-int tipc_link_is_up(struct tipc_link *l_ptr)
+int tipc_link_is_active(struct tipc_link *l)
 {
-       if (!l_ptr)
-               return 0;
-       return link_working_working(l_ptr) || link_working_unknown(l_ptr);
+       struct tipc_node *n = l->owner;
+
+       return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
 }
 
-int tipc_link_is_active(struct tipc_link *l_ptr)
+static u32 link_own_addr(struct tipc_link *l)
 {
-       return  (l_ptr->owner->active_links[0] == l_ptr) ||
-               (l_ptr->owner->active_links[1] == l_ptr);
+       return msg_prevnode(l->pmsg);
 }
 
 /**
- * link_timeout - handle expiration of link timer
- * @l_ptr: pointer to link
+ * tipc_link_create - create a new link
+ * @n: pointer to associated node
+ * @b: pointer to associated bearer
+ * @ownnode: identity of own node
+ * @peer: identity of peer node
+ * @maddr: media address to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
+ *
+ * Returns true if link was created, otherwise false
  */
-static void link_timeout(unsigned long data)
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+                     u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+                     struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+                     struct tipc_link **link)
 {
-       struct tipc_link *l_ptr = (struct tipc_link *)data;
-       struct sk_buff *skb;
+       struct tipc_link *l;
+       struct tipc_msg *hdr;
+       char *if_name;
+
+       l = kzalloc(sizeof(*l), GFP_ATOMIC);
+       if (!l)
+               return false;
+       *link = l;
+
+       /* Note: peer i/f name is completed by reset/activate message */
+       if_name = strchr(b->name, ':') + 1;
+       sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
+               tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
+               if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+
+       l->addr = peer;
+       l->media_addr = maddr;
+       l->owner = n;
+       l->peer_session = WILDCARD_SESSION;
+       l->bearer_id = b->identity;
+       l->tolerance = b->tolerance;
+       l->net_plane = b->net_plane;
+       l->advertised_mtu = b->mtu;
+       l->mtu = b->mtu;
+       l->priority = b->priority;
+       tipc_link_set_queue_limits(l, b->window);
+       l->inputq = inputq;
+       l->namedq = namedq;
+       l->state = LINK_RESETTING;
+       l->pmsg = (struct tipc_msg *)&l->proto_msg;
+       hdr = l->pmsg;
+       tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
+       msg_set_size(hdr, sizeof(l->proto_msg));
+       msg_set_session(hdr, session);
+       msg_set_bearer_id(hdr, l->bearer_id);
+       strcpy((char *)msg_data(hdr), if_name);
+       __skb_queue_head_init(&l->transmq);
+       __skb_queue_head_init(&l->backlogq);
+       __skb_queue_head_init(&l->deferdq);
+       skb_queue_head_init(&l->wakeupq);
+       skb_queue_head_init(l->inputq);
+       return true;
+}
 
-       tipc_node_lock(l_ptr->owner);
+/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+                                   struct sk_buff_head *xmitq)
+{
+       struct sk_buff *skb;
+       struct sk_buff_head list;
+       u16 last_sent;
 
-       /* update counters used in statistical profiling of send traffic */
-       l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
-       l_ptr->stats.queue_sz_counts++;
+       skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+                             0, l->addr, link_own_addr(l), 0, 0, 0);
+       if (!skb)
+               return;
+       last_sent = tipc_bclink_get_last_sent(l->owner->net);
+       msg_set_last_bcast(buf_msg(skb), last_sent);
+       __skb_queue_head_init(&list);
+       __skb_queue_tail(&list, skb);
+       tipc_link_xmit(l, &list, xmitq);
+}
 
-       skb = skb_peek(&l_ptr->transmq);
-       if (skb) {
-               struct tipc_msg *msg = buf_msg(skb);
-               u32 length = msg_size(msg);
+/**
+ * tipc_link_fsm_evt - link finite state machine
+ * @l: pointer to link
+ * @evt: state machine event to be processed
+ */
+int tipc_link_fsm_evt(struct tipc_link *l, int evt)
+{
+       int rc = 0;
 
-               if ((msg_user(msg) == MSG_FRAGMENTER) &&
-                   (msg_type(msg) == FIRST_FRAGMENT)) {
-                       length = msg_size(msg_get_wrapped(msg));
+       switch (l->state) {
+       case LINK_RESETTING:
+               switch (evt) {
+               case LINK_PEER_RESET_EVT:
+                       l->state = LINK_PEER_RESET;
+                       break;
+               case LINK_RESET_EVT:
+                       l->state = LINK_RESET;
+                       break;
+               case LINK_FAILURE_EVT:
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_ESTABLISH_EVT:
+               case LINK_FAILOVER_END_EVT:
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_RESET:
+               switch (evt) {
+               case LINK_PEER_RESET_EVT:
+                       l->state = LINK_ESTABLISHING;
+                       break;
+               case LINK_FAILOVER_BEGIN_EVT:
+                       l->state = LINK_FAILINGOVER;
+               case LINK_FAILURE_EVT:
+               case LINK_RESET_EVT:
+               case LINK_ESTABLISH_EVT:
+               case LINK_FAILOVER_END_EVT:
+                       break;
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_PEER_RESET:
+               switch (evt) {
+               case LINK_RESET_EVT:
+                       l->state = LINK_ESTABLISHING;
+                       break;
+               case LINK_PEER_RESET_EVT:
+               case LINK_ESTABLISH_EVT:
+               case LINK_FAILURE_EVT:
+                       break;
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_SYNCH_END_EVT:
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_FAILINGOVER:
+               switch (evt) {
+               case LINK_FAILOVER_END_EVT:
+                       l->state = LINK_RESET;
+                       break;
+               case LINK_PEER_RESET_EVT:
+               case LINK_RESET_EVT:
+               case LINK_ESTABLISH_EVT:
+               case LINK_FAILURE_EVT:
+                       break;
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_ESTABLISHING:
+               switch (evt) {
+               case LINK_ESTABLISH_EVT:
+                       l->state = LINK_ESTABLISHED;
+                       rc |= TIPC_LINK_UP_EVT;
+                       break;
+               case LINK_FAILOVER_BEGIN_EVT:
+                       l->state = LINK_FAILINGOVER;
+                       break;
+               case LINK_PEER_RESET_EVT:
+               case LINK_RESET_EVT:
+               case LINK_FAILURE_EVT:
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_FAILOVER_END_EVT:
+                       break;
+               case LINK_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_ESTABLISHED:
+               switch (evt) {
+               case LINK_PEER_RESET_EVT:
+                       l->state = LINK_PEER_RESET;
+                       rc |= TIPC_LINK_DOWN_EVT;
+                       break;
+               case LINK_FAILURE_EVT:
+                       l->state = LINK_RESETTING;
+                       rc |= TIPC_LINK_DOWN_EVT;
+                       break;
+               case LINK_RESET_EVT:
+                       l->state = LINK_RESET;
+                       break;
+               case LINK_ESTABLISH_EVT:
+                       break;
+               case LINK_SYNCH_BEGIN_EVT:
+                       l->state = LINK_SYNCHING;
+                       break;
+               case LINK_SYNCH_END_EVT:
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
                }
-               if (length) {
-                       l_ptr->stats.msg_lengths_total += length;
-                       l_ptr->stats.msg_length_counts++;
-                       if (length <= 64)
-                               l_ptr->stats.msg_length_profile[0]++;
-                       else if (length <= 256)
-                               l_ptr->stats.msg_length_profile[1]++;
-                       else if (length <= 1024)
-                               l_ptr->stats.msg_length_profile[2]++;
-                       else if (length <= 4096)
-                               l_ptr->stats.msg_length_profile[3]++;
-                       else if (length <= 16384)
-                               l_ptr->stats.msg_length_profile[4]++;
-                       else if (length <= 32768)
-                               l_ptr->stats.msg_length_profile[5]++;
-                       else
-                               l_ptr->stats.msg_length_profile[6]++;
+               break;
+       case LINK_SYNCHING:
+               switch (evt) {
+               case LINK_PEER_RESET_EVT:
+                       l->state = LINK_PEER_RESET;
+                       rc |= TIPC_LINK_DOWN_EVT;
+                       break;
+               case LINK_FAILURE_EVT:
+                       l->state = LINK_RESETTING;
+                       rc |= TIPC_LINK_DOWN_EVT;
+                       break;
+               case LINK_RESET_EVT:
+                       l->state = LINK_RESET;
+                       break;
+               case LINK_ESTABLISH_EVT:
+               case LINK_SYNCH_BEGIN_EVT:
+                       break;
+               case LINK_SYNCH_END_EVT:
+                       l->state = LINK_ESTABLISHED;
+                       break;
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
                }
+               break;
+       default:
+               pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
        }
-
-       /* do all other link processing performed on a periodic basis */
-       if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
-               link_state_event(l_ptr, SILENCE_EVT);
-       l_ptr->silent_intv_cnt++;
-       if (skb_queue_len(&l_ptr->backlogq))
-               tipc_link_push_packets(l_ptr);
-       link_set_timer(l_ptr, l_ptr->keepalive_intv);
-       tipc_node_unlock(l_ptr->owner);
-       tipc_link_put(l_ptr);
-}
-
-static void link_set_timer(struct tipc_link *link, unsigned long time)
-{
-       if (!mod_timer(&link->timer, jiffies + time))
-               tipc_link_get(link);
+       return rc;
+illegal_evt:
+       pr_err("Illegal FSM event %x in state %x on link %s\n",
+              evt, l->state, l->name);
+       return rc;
 }
 
-/**
- * tipc_link_create - create a new link
- * @n_ptr: pointer to associated node
- * @b_ptr: pointer to associated bearer
- * @media_addr: media address to use when sending messages over link
- *
- * Returns pointer to link.
+/* link_profile_stats - update statistical profiling of traffic
  */
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-                                  struct tipc_bearer *b_ptr,
-                                  const struct tipc_media_addr *media_addr)
+static void link_profile_stats(struct tipc_link *l)
 {
-       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
-       struct tipc_link *l_ptr;
+       struct sk_buff *skb;
        struct tipc_msg *msg;
-       char *if_name;
-       char addr_string[16];
-       u32 peer = n_ptr->addr;
+       int length;
 
-       if (n_ptr->link_cnt >= MAX_BEARERS) {
-               tipc_addr_string_fill(addr_string, n_ptr->addr);
-               pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
-                      n_ptr->link_cnt, addr_string, MAX_BEARERS);
-               return NULL;
-       }
+       /* Update counters used in statistical profiling of send traffic */
+       l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
+       l->stats.queue_sz_counts++;
 
-       if (n_ptr->links[b_ptr->identity]) {
-               tipc_addr_string_fill(addr_string, n_ptr->addr);
-               pr_err("Attempt to establish second link on <%s> to %s\n",
-                      b_ptr->name, addr_string);
-               return NULL;
-       }
+       skb = skb_peek(&l->transmq);
+       if (!skb)
+               return;
+       msg = buf_msg(skb);
+       length = msg_size(msg);
 
-       l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
-       if (!l_ptr) {
-               pr_warn("Link creation failed, no memory\n");
-               return NULL;
+       if (msg_user(msg) == MSG_FRAGMENTER) {
+               if (msg_type(msg) != FIRST_FRAGMENT)
+                       return;
+               length = msg_size(msg_get_wrapped(msg));
        }
-       kref_init(&l_ptr->ref);
-       l_ptr->addr = peer;
-       if_name = strchr(b_ptr->name, ':') + 1;
-       sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
-               tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
-               tipc_node(tn->own_addr),
-               if_name,
-               tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
-               /* note: peer i/f name is updated by reset/activate message */
-       memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
-       l_ptr->owner = n_ptr;
-       l_ptr->peer_session = INVALID_SESSION;
-       l_ptr->bearer_id = b_ptr->identity;
-       link_set_supervision_props(l_ptr, b_ptr->tolerance);
-       l_ptr->state = RESET_UNKNOWN;
-
-       l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
-       msg = l_ptr->pmsg;
-       tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
-                     l_ptr->addr);
-       msg_set_size(msg, sizeof(l_ptr->proto_msg));
-       msg_set_session(msg, (tn->random & 0xffff));
-       msg_set_bearer_id(msg, b_ptr->identity);
-       strcpy((char *)msg_data(msg), if_name);
-       l_ptr->net_plane = b_ptr->net_plane;
-       l_ptr->advertised_mtu = b_ptr->mtu;
-       l_ptr->mtu = l_ptr->advertised_mtu;
-       l_ptr->priority = b_ptr->priority;
-       tipc_link_set_queue_limits(l_ptr, b_ptr->window);
-       l_ptr->snd_nxt = 1;
-       __skb_queue_head_init(&l_ptr->transmq);
-       __skb_queue_head_init(&l_ptr->backlogq);
-       __skb_queue_head_init(&l_ptr->deferdq);
-       skb_queue_head_init(&l_ptr->wakeupq);
-       skb_queue_head_init(&l_ptr->inputq);
-       skb_queue_head_init(&l_ptr->namedq);
-       link_reset_statistics(l_ptr);
-       tipc_node_attach_link(n_ptr, l_ptr);
-       setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
-       link_state_event(l_ptr, STARTING_EVT);
-
-       return l_ptr;
+       l->stats.msg_lengths_total += length;
+       l->stats.msg_length_counts++;
+       if (length <= 64)
+               l->stats.msg_length_profile[0]++;
+       else if (length <= 256)
+               l->stats.msg_length_profile[1]++;
+       else if (length <= 1024)
+               l->stats.msg_length_profile[2]++;
+       else if (length <= 4096)
+               l->stats.msg_length_profile[3]++;
+       else if (length <= 16384)
+               l->stats.msg_length_profile[4]++;
+       else if (length <= 32768)
+               l->stats.msg_length_profile[5]++;
+       else
+               l->stats.msg_length_profile[6]++;
 }
 
-/**
- * tipc_link_delete - Delete a link
- * @l: link to be deleted
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
  */
-void tipc_link_delete(struct tipc_link *l)
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-       tipc_link_reset(l);
-       if (del_timer(&l->timer))
-               tipc_link_put(l);
-       l->flags |= LINK_STOPPED;
-       /* Delete link now, or when timer is finished: */
-       tipc_link_reset_fragments(l);
-       tipc_node_detach_link(l->owner, l);
-       tipc_link_put(l);
-}
+       int rc = 0;
+       int mtyp = STATE_MSG;
+       bool xmit = false;
+       bool prb = false;
+
+       link_profile_stats(l);
+
+       switch (l->state) {
+       case LINK_ESTABLISHED:
+       case LINK_SYNCHING:
+               if (!l->silent_intv_cnt) {
+                       if (tipc_bclink_acks_missing(l->owner))
+                               xmit = true;
+               } else if (l->silent_intv_cnt <= l->abort_limit) {
+                       xmit = true;
+                       prb = true;
+               } else {
+                       rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+               }
+               l->silent_intv_cnt++;
+               break;
+       case LINK_RESET:
+               xmit = true;
+               mtyp = RESET_MSG;
+               break;
+       case LINK_ESTABLISHING:
+               xmit = true;
+               mtyp = ACTIVATE_MSG;
+               break;
+       case LINK_PEER_RESET:
+       case LINK_RESETTING:
+       case LINK_FAILINGOVER:
+               break;
+       default:
+               break;
+       }
 
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *link;
-       struct tipc_node *node;
+       if (xmit)
+               tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(node, &tn->node_list, list) {
-               tipc_node_lock(node);
-               link = node->links[bearer_id];
-               if (link)
-                       tipc_link_delete(link);
-               tipc_node_unlock(node);
-       }
-       rcu_read_unlock();
+       return rc;
 }
 
 /**
@@ -334,7 +490,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
  * @link: congested link
  * @list: message that was attempted sent
  * Create pseudo msg to send back to user when congestion abates
- * Only consumes message if there is an error
+ * Does not consume buffer list
  */
 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
 {
@@ -347,8 +503,7 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
        /* This really cannot happen...  */
        if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
                pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
-               tipc_link_reset(link);
-               goto err;
+               return -ENOBUFS;
        }
        /* Non-blocking sender: */
        if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
@@ -358,15 +513,12 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
        skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
                              addr, addr, oport, 0, 0);
        if (!skb)
-               goto err;
+               return -ENOBUFS;
        TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
        TIPC_SKB_CB(skb)->chain_imp = imp;
        skb_queue_tail(&link->wakeupq, skb);
        link->stats.link_congs++;
        return -ELINKCONG;
-err:
-       __skb_queue_purge(list);
-       return -ENOBUFS;
 }
 
 /**
@@ -388,9 +540,7 @@ void link_prepare_wakeup(struct tipc_link *l)
                if ((pnd[imp] + l->backlog[imp].len) >= lim)
                        break;
                skb_unlink(skb, &l->wakeupq);
-               skb_queue_tail(&l->inputq, skb);
-               l->owner->inputq = &l->inputq;
-               l->owner->action_flags |= TIPC_MSG_EVT;
+               skb_queue_tail(l->inputq, skb);
        }
 }
 
@@ -426,208 +576,36 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
        tipc_link_reset_fragments(l_ptr);
 }
 
-void tipc_link_reset(struct tipc_link *l_ptr)
+void tipc_link_reset(struct tipc_link *l)
 {
-       u32 prev_state = l_ptr->state;
-       int was_active_link = tipc_link_is_active(l_ptr);
-       struct tipc_node *owner = l_ptr->owner;
-       struct tipc_link *pl = tipc_parallel_link(l_ptr);
-
-       msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
+       tipc_link_fsm_evt(l, LINK_RESET_EVT);
 
        /* Link is down, accept any session */
-       l_ptr->peer_session = INVALID_SESSION;
-
-       /* Prepare for renewed mtu size negotiation */
-       l_ptr->mtu = l_ptr->advertised_mtu;
-
-       l_ptr->state = RESET_UNKNOWN;
-
-       if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
-               return;
-
-       tipc_node_link_down(l_ptr->owner, l_ptr);
-       tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
-
-       if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
-               l_ptr->flags |= LINK_FAILINGOVER;
-               l_ptr->failover_checkpt = l_ptr->rcv_nxt;
-               pl->failover_pkts = FIRST_FAILOVER;
-               pl->failover_checkpt = l_ptr->rcv_nxt;
-               pl->failover_skb = l_ptr->reasm_buf;
-       } else {
-               kfree_skb(l_ptr->reasm_buf);
-       }
-       /* Clean up all queues, except inputq: */
-       __skb_queue_purge(&l_ptr->transmq);
-       __skb_queue_purge(&l_ptr->deferdq);
-       if (!owner->inputq)
-               owner->inputq = &l_ptr->inputq;
-       skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
-       if (!skb_queue_empty(owner->inputq))
-               owner->action_flags |= TIPC_MSG_EVT;
-       tipc_link_purge_backlog(l_ptr);
-       l_ptr->reasm_buf = NULL;
-       l_ptr->rcv_unacked = 0;
-       l_ptr->snd_nxt = 1;
-       l_ptr->silent_intv_cnt = 0;
-       l_ptr->stale_count = 0;
-       link_reset_statistics(l_ptr);
-}
-
-static void link_activate(struct tipc_link *link)
-{
-       struct tipc_node *node = link->owner;
-
-       link->rcv_nxt = 1;
-       link->stats.recv_info = 1;
-       link->silent_intv_cnt = 0;
-       tipc_node_link_up(node, link);
-       tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
-}
-
-/**
- * link_state_event - link finite state machine
- * @l_ptr: pointer to link
- * @event: state machine event to process
- */
-static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
-{
-       struct tipc_link *other;
-       unsigned long timer_intv = l_ptr->keepalive_intv;
+       l->peer_session = WILDCARD_SESSION;
 
-       if (l_ptr->flags & LINK_STOPPED)
-               return;
-
-       if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
-               return;         /* Not yet. */
-
-       if (l_ptr->flags & LINK_FAILINGOVER)
-               return;
+       /* If peer is up, it only accepts an incremented session number */
+       msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
 
-       switch (l_ptr->state) {
-       case WORKING_WORKING:
-               switch (event) {
-               case TRAFFIC_MSG_EVT:
-               case ACTIVATE_MSG:
-                       l_ptr->silent_intv_cnt = 0;
-                       break;
-               case SILENCE_EVT:
-                       if (!l_ptr->silent_intv_cnt) {
-                               if (tipc_bclink_acks_missing(l_ptr->owner))
-                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            0, 0, 0, 0);
-                               break;
-                       }
-                       l_ptr->state = WORKING_UNKNOWN;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       break;
-               case RESET_MSG:
-                       pr_debug("%s<%s>, requested by peer\n",
-                                link_rst_msg, l_ptr->name);
-                       tipc_link_reset(l_ptr);
-                       l_ptr->state = RESET_RESET;
-                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0);
-                       break;
-               default:
-                       pr_debug("%s%u in WW state\n", link_unk_evt, event);
-               }
-               break;
-       case WORKING_UNKNOWN:
-               switch (event) {
-               case TRAFFIC_MSG_EVT:
-               case ACTIVATE_MSG:
-                       l_ptr->state = WORKING_WORKING;
-                       l_ptr->silent_intv_cnt = 0;
-                       break;
-               case RESET_MSG:
-                       pr_debug("%s<%s>, requested by peer while probing\n",
-                                link_rst_msg, l_ptr->name);
-                       tipc_link_reset(l_ptr);
-                       l_ptr->state = RESET_RESET;
-                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0);
-                       break;
-               case SILENCE_EVT:
-                       if (!l_ptr->silent_intv_cnt) {
-                               l_ptr->state = WORKING_WORKING;
-                               if (tipc_bclink_acks_missing(l_ptr->owner))
-                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            0, 0, 0, 0);
-                       } else if (l_ptr->silent_intv_cnt <
-                                  l_ptr->abort_limit) {
-                               tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                    1, 0, 0, 0);
-                       } else {        /* Link has failed */
-                               pr_debug("%s<%s>, peer not responding\n",
-                                        link_rst_msg, l_ptr->name);
-                               tipc_link_reset(l_ptr);
-                               l_ptr->state = RESET_UNKNOWN;
-                               tipc_link_proto_xmit(l_ptr, RESET_MSG,
-                                                    0, 0, 0, 0);
-                       }
-                       break;
-               default:
-                       pr_err("%s%u in WU state\n", link_unk_evt, event);
-               }
-               break;
-       case RESET_UNKNOWN:
-               switch (event) {
-               case TRAFFIC_MSG_EVT:
-                       break;
-               case ACTIVATE_MSG:
-                       other = l_ptr->owner->active_links[0];
-                       if (other && link_working_unknown(other))
-                               break;
-                       l_ptr->state = WORKING_WORKING;
-                       link_activate(l_ptr);
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       if (l_ptr->owner->working_links == 1)
-                               tipc_link_sync_xmit(l_ptr);
-                       break;
-               case RESET_MSG:
-                       l_ptr->state = RESET_RESET;
-                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            1, 0, 0, 0);
-                       break;
-               case STARTING_EVT:
-                       l_ptr->flags |= LINK_STARTED;
-                       link_set_timer(l_ptr, timer_intv);
-                       break;
-               case SILENCE_EVT:
-                       tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
-                       break;
-               default:
-                       pr_err("%s%u in RU state\n", link_unk_evt, event);
-               }
-               break;
-       case RESET_RESET:
-               switch (event) {
-               case TRAFFIC_MSG_EVT:
-               case ACTIVATE_MSG:
-                       other = l_ptr->owner->active_links[0];
-                       if (other && link_working_unknown(other))
-                               break;
-                       l_ptr->state = WORKING_WORKING;
-                       link_activate(l_ptr);
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       if (l_ptr->owner->working_links == 1)
-                               tipc_link_sync_xmit(l_ptr);
-                       break;
-               case RESET_MSG:
-                       break;
-               case SILENCE_EVT:
-                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0);
-                       break;
-               default:
-                       pr_err("%s%u in RR state\n", link_unk_evt, event);
-               }
-               break;
-       default:
-               pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
-       }
+       /* Prepare for renewed mtu size negotiation */
+       l->mtu = l->advertised_mtu;
+
+       /* Clean up all queues: */
+       __skb_queue_purge(&l->transmq);
+       __skb_queue_purge(&l->deferdq);
+       skb_queue_splice_init(&l->wakeupq, l->inputq);
+
+       tipc_link_purge_backlog(l);
+       kfree_skb(l->reasm_buf);
+       kfree_skb(l->failover_reasm_skb);
+       l->reasm_buf = NULL;
+       l->failover_reasm_skb = NULL;
+       l->rcv_unacked = 0;
+       l->snd_nxt = 1;
+       l->rcv_nxt = 1;
+       l->silent_intv_cnt = 0;
+       l->stats.recv_info = 0;
+       l->stale_count = 0;
+       link_reset_statistics(l);
 }
 
 /**
@@ -635,8 +613,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
  * @link: link to use
  * @list: chain of buffers containing message
  *
- * Consumes the buffer chain, except when returning -ELINKCONG,
- * since the caller then may want to make more send attempts.
+ * Consumes the buffer chain, except when returning an error code,
  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
@@ -650,7 +627,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
        u16 ack = mod(link->rcv_nxt - 1);
        u16 seqno = link->snd_nxt;
        u16 bc_last_in = link->owner->bclink.last_in;
-       struct tipc_media_addr *addr = &link->media_addr;
+       struct tipc_media_addr *addr = link->media_addr;
        struct sk_buff_head *transmq = &link->transmq;
        struct sk_buff_head *backlogq = &link->backlogq;
        struct sk_buff *skb, *bskb;
@@ -660,10 +637,9 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
                if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
                        return link_schedule_user(link, list);
        }
-       if (unlikely(msg_size(msg) > mtu)) {
-               __skb_queue_purge(list);
+       if (unlikely(msg_size(msg) > mtu))
                return -EMSGSIZE;
-       }
+
        /* Prepare each packet for sending, and add to relevant queue: */
        while (skb_queue_len(list)) {
                skb = skb_peek(list);
@@ -700,101 +676,76 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
        return 0;
 }
 
-static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
-{
-       skb_queue_head_init(list);
-       __skb_queue_tail(list, skb);
-}
-
-static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
-{
-       struct sk_buff_head head;
-
-       skb2list(skb, &head);
-       return __tipc_link_xmit(link->owner->net, link, &head);
-}
-
-/* tipc_link_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
- * messages, which will not be rejected
- * The only exception is datagram messages rerouted after secondary
- * lookup, which are rare and safe to dispose of anyway.
- * TODO: Return real return value, and let callers use
- * tipc_wait_for_sendpkt() where applicable
- */
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
-                      u32 selector)
-{
-       struct sk_buff_head head;
-       int rc;
-
-       skb2list(skb, &head);
-       rc = tipc_link_xmit(net, &head, dnode, selector);
-       if (rc == -ELINKCONG)
-               kfree_skb(skb);
-       return 0;
-}
-
 /**
- * tipc_link_xmit() is the general link level function for message sending
- * @net: the applicable net namespace
+ * tipc_link_xmit(): enqueue buffer list according to queue situation
+ * @link: link to use
  * @list: chain of buffers containing message
- * @dsz: amount of user data to be sent
- * @dnode: address of destination node
- * @selector: a number used for deterministic link selection
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ * @xmitq: returned list of packets to be sent by caller
+ *
+ * Consumes the buffer chain, except when returning -ELINKCONG,
+ * since the caller then may want to make more send attempts.
+ * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
-                  u32 selector)
+int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+                  struct sk_buff_head *xmitq)
 {
-       struct tipc_link *link = NULL;
-       struct tipc_node *node;
-       int rc = -EHOSTUNREACH;
+       struct tipc_msg *hdr = buf_msg(skb_peek(list));
+       unsigned int maxwin = l->window;
+       unsigned int i, imp = msg_importance(hdr);
+       unsigned int mtu = l->mtu;
+       u16 ack = l->rcv_nxt - 1;
+       u16 seqno = l->snd_nxt;
+       u16 bc_last_in = l->owner->bclink.last_in;
+       struct sk_buff_head *transmq = &l->transmq;
+       struct sk_buff_head *backlogq = &l->backlogq;
+       struct sk_buff *skb, *_skb, *bskb;
 
-       node = tipc_node_find(net, dnode);
-       if (node) {
-               tipc_node_lock(node);
-               link = node->active_links[selector & 1];
-               if (link)
-                       rc = __tipc_link_xmit(net, link, list);
-               tipc_node_unlock(node);
-               tipc_node_put(node);
-       }
-       if (link)
-               return rc;
-
-       if (likely(in_own_node(net, dnode))) {
-               tipc_sk_rcv(net, list);
-               return 0;
+       /* Match msg importance against this and all higher backlog limits: */
+       for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+               if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
+                       return link_schedule_user(l, list);
        }
+       if (unlikely(msg_size(hdr) > mtu))
+               return -EMSGSIZE;
 
-       __skb_queue_purge(list);
-       return rc;
-}
-
-/*
- * tipc_link_sync_xmit - synchronize broadcast link endpoints.
- *
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
- *
- * Called with node locked
- */
-static void tipc_link_sync_xmit(struct tipc_link *link)
-{
-       struct sk_buff *skb;
-       struct tipc_msg *msg;
-
-       skb = tipc_buf_acquire(INT_H_SIZE);
-       if (!skb)
-               return;
+       /* Prepare each packet for sending, and add to relevant queue: */
+       while (skb_queue_len(list)) {
+               skb = skb_peek(list);
+               hdr = buf_msg(skb);
+               msg_set_seqno(hdr, seqno);
+               msg_set_ack(hdr, ack);
+               msg_set_bcast_ack(hdr, bc_last_in);
 
-       msg = buf_msg(skb);
-       tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
-                     INT_H_SIZE, link->addr);
-       msg_set_last_bcast(msg, link->owner->bclink.acked);
-       __tipc_link_xmit_skb(link, skb);
+               if (likely(skb_queue_len(transmq) < maxwin)) {
+                       _skb = skb_clone(skb, GFP_ATOMIC);
+                       if (!_skb)
+                               return -ENOBUFS;
+                       __skb_dequeue(list);
+                       __skb_queue_tail(transmq, skb);
+                       __skb_queue_tail(xmitq, _skb);
+                       l->rcv_unacked = 0;
+                       seqno++;
+                       continue;
+               }
+               if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+                       kfree_skb(__skb_dequeue(list));
+                       l->stats.sent_bundled++;
+                       continue;
+               }
+               if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+                       kfree_skb(__skb_dequeue(list));
+                       __skb_queue_tail(backlogq, bskb);
+                       l->backlog[msg_importance(buf_msg(bskb))].len++;
+                       l->stats.sent_bundled++;
+                       l->stats.sent_bundles++;
+                       continue;
+               }
+               l->backlog[imp].len += skb_queue_len(list);
+               skb_queue_splice_tail_init(list, backlogq);
+       }
+       l->snd_nxt = seqno;
+       return 0;
 }
 
 /*
@@ -842,29 +793,37 @@ void tipc_link_push_packets(struct tipc_link *link)
                link->rcv_unacked = 0;
                __skb_queue_tail(&link->transmq, skb);
                tipc_bearer_send(link->owner->net, link->bearer_id,
-                                skb, &link->media_addr);
+                                skb, link->media_addr);
        }
        link->snd_nxt = seqno;
 }
 
-void tipc_link_reset_all(struct tipc_node *node)
+void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-       char addr_string[16];
-       u32 i;
+       struct sk_buff *skb, *_skb;
+       struct tipc_msg *hdr;
+       u16 seqno = l->snd_nxt;
+       u16 ack = l->rcv_nxt - 1;
 
-       tipc_node_lock(node);
-
-       pr_warn("Resetting all links to %s\n",
-               tipc_addr_string_fill(addr_string, node->addr));
-
-       for (i = 0; i < MAX_BEARERS; i++) {
-               if (node->links[i]) {
-                       link_print(node->links[i], "Resetting link\n");
-                       tipc_link_reset(node->links[i]);
-               }
+       while (skb_queue_len(&l->transmq) < l->window) {
+               skb = skb_peek(&l->backlogq);
+               if (!skb)
+                       break;
+               _skb = skb_clone(skb, GFP_ATOMIC);
+               if (!_skb)
+                       break;
+               __skb_dequeue(&l->backlogq);
+               hdr = buf_msg(skb);
+               l->backlog[msg_importance(hdr)].len--;
+               __skb_queue_tail(&l->transmq, skb);
+               __skb_queue_tail(xmitq, _skb);
+               msg_set_ack(hdr, ack);
+               msg_set_seqno(hdr, seqno);
+               msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+               l->rcv_unacked = 0;
+               seqno++;
        }
-
-       tipc_node_unlock(node);
+       l->snd_nxt = seqno;
 }
 
 static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -877,9 +836,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
 
        if (l_ptr->addr) {
                /* Handle failure on standard link */
-               link_print(l_ptr, "Resetting link\n");
-               tipc_link_reset(l_ptr);
-
+               link_print(l_ptr, "Resetting link ");
+               pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+                       msg_user(msg), msg_type(msg), msg_size(msg),
+                       msg_errcode(msg));
+               pr_info("sqno %u, prev: %x, src: %x\n",
+                       msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
        } else {
                /* Handle failure on broadcast link */
                struct tipc_node *n_ptr;
@@ -934,191 +896,45 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
                msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
                tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
-                                &l_ptr->media_addr);
+                                l_ptr->media_addr);
                retransmits--;
                l_ptr->stats.retransmitted++;
        }
 }
 
-/* link_synch(): check if all packets arrived before the synch
- *               point have been consumed
- * Returns true if the parallel links are synched, otherwise false
- */
-static bool link_synch(struct tipc_link *l)
-{
-       unsigned int post_synch;
-       struct tipc_link *pl;
-
-       pl  = tipc_parallel_link(l);
-       if (pl == l)
-               goto synched;
-
-       /* Was last pre-synch packet added to input queue ? */
-       if (less_eq(pl->rcv_nxt, l->synch_point))
-               return false;
-
-       /* Is it still in the input queue ? */
-       post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
-       if (skb_queue_len(&pl->inputq) > post_synch)
-               return false;
-synched:
-       l->flags &= ~LINK_SYNCHING;
-       return true;
-}
-
-static void link_retrieve_defq(struct tipc_link *link,
-                              struct sk_buff_head *list)
+static int tipc_link_retransm(struct tipc_link *l, int retransm,
+                             struct sk_buff_head *xmitq)
 {
-       u16 seq_no;
-
-       if (skb_queue_empty(&link->deferdq))
-               return;
-
-       seq_no = buf_seqno(skb_peek(&link->deferdq));
-       if (seq_no == link->rcv_nxt)
-               skb_queue_splice_tail_init(&link->deferdq, list);
-}
-
-/**
- * tipc_rcv - process TIPC packets/messages arriving from off-node
- * @net: the applicable net namespace
- * @skb: TIPC packet
- * @b_ptr: pointer to bearer message arrived on
- *
- * Invoked with no locks held.  Bearer pointer must point to a valid bearer
- * structure (i.e. cannot be NULL), but bearer can be inactive.
- */
-void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct sk_buff_head head;
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
-       struct sk_buff *skb1, *tmp;
-       struct tipc_msg *msg;
-       u16 seq_no;
-       u16 ackd;
-       u32 released;
+       struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
+       struct tipc_msg *hdr;
 
-       skb2list(skb, &head);
-
-       while ((skb = __skb_dequeue(&head))) {
-               /* Ensure message is well-formed */
-               if (unlikely(!tipc_msg_validate(skb)))
-                       goto discard;
-
-               /* Handle arrival of a non-unicast link message */
-               msg = buf_msg(skb);
-               if (unlikely(msg_non_seq(msg))) {
-                       if (msg_user(msg) ==  LINK_CONFIG)
-                               tipc_disc_rcv(net, skb, b_ptr);
-                       else
-                               tipc_bclink_rcv(net, skb);
-                       continue;
-               }
-
-               /* Discard unicast link messages destined for another node */
-               if (unlikely(!msg_short(msg) &&
-                            (msg_destnode(msg) != tn->own_addr)))
-                       goto discard;
-
-               /* Locate neighboring node that sent message */
-               n_ptr = tipc_node_find(net, msg_prevnode(msg));
-               if (unlikely(!n_ptr))
-                       goto discard;
-
-               tipc_node_lock(n_ptr);
-               /* Locate unicast link endpoint that should handle message */
-               l_ptr = n_ptr->links[b_ptr->identity];
-               if (unlikely(!l_ptr))
-                       goto unlock;
-
-               /* Verify that communication with node is currently allowed */
-               if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
-                   msg_user(msg) == LINK_PROTOCOL &&
-                   (msg_type(msg) == RESET_MSG ||
-                   msg_type(msg) == ACTIVATE_MSG) &&
-                   !msg_redundant_link(msg))
-                       n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
-
-               if (tipc_node_blocked(n_ptr))
-                       goto unlock;
-
-               /* Validate message sequence number info */
-               seq_no = msg_seqno(msg);
-               ackd = msg_ack(msg);
-
-               /* Release acked messages */
-               if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
-                       tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
-
-               released = 0;
-               skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
-                       if (more(buf_seqno(skb1), ackd))
-                               break;
-                        __skb_unlink(skb1, &l_ptr->transmq);
-                        kfree_skb(skb1);
-                        released = 1;
-               }
-
-               /* Try sending any messages link endpoint has pending */
-               if (unlikely(skb_queue_len(&l_ptr->backlogq)))
-                       tipc_link_push_packets(l_ptr);
-
-               if (released && !skb_queue_empty(&l_ptr->wakeupq))
-                       link_prepare_wakeup(l_ptr);
-
-               /* Process the incoming packet */
-               if (unlikely(!link_working_working(l_ptr))) {
-                       if (msg_user(msg) == LINK_PROTOCOL) {
-                               tipc_link_proto_rcv(l_ptr, skb);
-                               link_retrieve_defq(l_ptr, &head);
-                               skb = NULL;
-                               goto unlock;
-                       }
-
-                       /* Traffic message. Conditionally activate link */
-                       link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-
-                       if (link_working_working(l_ptr)) {
-                               /* Re-insert buffer in front of queue */
-                               __skb_queue_head(&head, skb);
-                               skb = NULL;
-                               goto unlock;
-                       }
-                       goto unlock;
-               }
-
-               /* Link is now in state WORKING_WORKING */
-               if (unlikely(seq_no != l_ptr->rcv_nxt)) {
-                       link_handle_out_of_seq_msg(l_ptr, skb);
-                       link_retrieve_defq(l_ptr, &head);
-                       skb = NULL;
-                       goto unlock;
-               }
-               l_ptr->silent_intv_cnt = 0;
+       if (!skb)
+               return 0;
 
-               /* Synchronize with parallel link if applicable */
-               if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
-                       if (!link_synch(l_ptr))
-                               goto unlock;
-               }
-               l_ptr->rcv_nxt++;
-               if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
-                       link_retrieve_defq(l_ptr, &head);
-               if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
-                       l_ptr->stats.sent_acks++;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
-               }
-               tipc_link_input(l_ptr, skb);
-               skb = NULL;
-unlock:
-               tipc_node_unlock(n_ptr);
-               tipc_node_put(n_ptr);
-discard:
-               if (unlikely(skb))
-                       kfree_skb(skb);
+       /* Detect repeated retransmit failures on same packet */
+       if (likely(l->last_retransm != buf_seqno(skb))) {
+               l->last_retransm = buf_seqno(skb);
+               l->stale_count = 1;
+       } else if (++l->stale_count > 100) {
+               link_retransmit_failure(l, skb);
+               return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+       }
+       skb_queue_walk(&l->transmq, skb) {
+               if (!retransm)
+                       return 0;
+               hdr = buf_msg(skb);
+               _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+               if (!_skb)
+                       return 0;
+               hdr = buf_msg(_skb);
+               msg_set_ack(hdr, l->rcv_nxt - 1);
+               msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+               _skb->priority = TC_PRIO_CONTROL;
+               __skb_queue_tail(xmitq, _skb);
+               retransm--;
+               l->stats.retransmitted++;
        }
+       return 0;
 }
 
 /* tipc_data_input - deliver data and name distr msgs to upper layer
@@ -1126,29 +942,22 @@ discard:
  * Consumes buffer if message is of right type
  * Node lock must be held
  */
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+                           struct sk_buff_head *inputq)
 {
        struct tipc_node *node = link->owner;
-       struct tipc_msg *msg = buf_msg(skb);
-       u32 dport = msg_destport(msg);
 
-       switch (msg_user(msg)) {
+       switch (msg_user(buf_msg(skb))) {
        case TIPC_LOW_IMPORTANCE:
        case TIPC_MEDIUM_IMPORTANCE:
        case TIPC_HIGH_IMPORTANCE:
        case TIPC_CRITICAL_IMPORTANCE:
        case CONN_MANAGER:
-               if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
-                       node->inputq = &link->inputq;
-                       node->action_flags |= TIPC_MSG_EVT;
-               }
+               __skb_queue_tail(inputq, skb);
                return true;
        case NAME_DISTRIBUTOR:
                node->bclink.recv_permitted = true;
-               node->namedq = &link->namedq;
-               skb_queue_tail(&link->namedq, skb);
-               if (skb_queue_len(&link->namedq) == 1)
-                       node->action_flags |= TIPC_NAMED_MSG_EVT;
+               skb_queue_tail(link->namedq, skb);
                return true;
        case MSG_BUNDLER:
        case TUNNEL_PROTOCOL:
@@ -1165,54 +974,160 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
 /* tipc_link_input - process packet that has passed link protocol check
  *
  * Consumes buffer
- * Node lock must be held
  */
-static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
+                          struct sk_buff_head *inputq)
 {
-       struct tipc_node *node = link->owner;
-       struct tipc_msg *msg = buf_msg(skb);
+       struct tipc_node *node = l->owner;
+       struct tipc_msg *hdr = buf_msg(skb);
+       struct sk_buff **reasm_skb = &l->reasm_buf;
        struct sk_buff *iskb;
+       int usr = msg_user(hdr);
+       int rc = 0;
        int pos = 0;
+       int ipos = 0;
 
-       if (likely(tipc_data_input(link, skb)))
-               return;
+       if (unlikely(usr == TUNNEL_PROTOCOL)) {
+               if (msg_type(hdr) == SYNCH_MSG) {
+                       __skb_queue_purge(&l->deferdq);
+                       goto drop;
+               }
+               if (!tipc_msg_extract(skb, &iskb, &ipos))
+                       return rc;
+               kfree_skb(skb);
+               skb = iskb;
+               hdr = buf_msg(skb);
+               if (less(msg_seqno(hdr), l->drop_point))
+                       goto drop;
+               if (tipc_data_input(l, skb, inputq))
+                       return rc;
+               usr = msg_user(hdr);
+               reasm_skb = &l->failover_reasm_skb;
+       }
 
-       switch (msg_user(msg)) {
-       case TUNNEL_PROTOCOL:
-               if (msg_dup(msg)) {
-                       link->flags |= LINK_SYNCHING;
-                       link->synch_point = msg_seqno(msg_get_wrapped(msg));
-                       kfree_skb(skb);
-                       break;
+       if (usr == MSG_BUNDLER) {
+               l->stats.recv_bundles++;
+               l->stats.recv_bundled += msg_msgcnt(hdr);
+               while (tipc_msg_extract(skb, &iskb, &pos))
+                       tipc_data_input(l, iskb, inputq);
+               return 0;
+       } else if (usr == MSG_FRAGMENTER) {
+               l->stats.recv_fragments++;
+               if (tipc_buf_append(reasm_skb, &skb)) {
+                       l->stats.recv_fragmented++;
+                       tipc_data_input(l, skb, inputq);
+               } else if (!*reasm_skb) {
+                       return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
                }
-               if (!tipc_link_failover_rcv(link, &skb))
-                       break;
-               if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
-                       tipc_data_input(link, skb);
+               return 0;
+       } else if (usr == BCAST_PROTOCOL) {
+               tipc_link_sync_rcv(node, skb);
+               return 0;
+       }
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
+{
+       bool released = false;
+       struct sk_buff *skb, *tmp;
+
+       skb_queue_walk_safe(&l->transmq, skb, tmp) {
+               if (more(buf_seqno(skb), acked))
                        break;
+               __skb_unlink(skb, &l->transmq);
+               kfree_skb(skb);
+               released = true;
+       }
+       return released;
+}
+
+/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
+ * @link: the link that should handle the message
+ * @skb: TIPC packet
+ * @xmitq: queue to place packets to be sent after this call
+ */
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+                 struct sk_buff_head *xmitq)
+{
+       struct sk_buff_head *arrvq = &l->deferdq;
+       struct sk_buff_head tmpq;
+       struct tipc_msg *hdr;
+       u16 seqno, rcv_nxt;
+       int rc = 0;
+
+       __skb_queue_head_init(&tmpq);
+
+       if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
+               if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
+                       tipc_link_build_proto_msg(l, STATE_MSG, 0,
+                                                 0, 0, 0, xmitq);
+               return rc;
+       }
+
+       while ((skb = skb_peek(arrvq))) {
+               hdr = buf_msg(skb);
+
+               /* Verify and update link state */
+               if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
+                       __skb_dequeue(arrvq);
+                       rc = tipc_link_proto_rcv(l, skb, xmitq);
+                       continue;
                }
-       case MSG_BUNDLER:
-               link->stats.recv_bundles++;
-               link->stats.recv_bundled += msg_msgcnt(msg);
 
-               while (tipc_msg_extract(skb, &iskb, &pos))
-                       tipc_data_input(link, iskb);
-               break;
-       case MSG_FRAGMENTER:
-               link->stats.recv_fragments++;
-               if (tipc_buf_append(&link->reasm_buf, &skb)) {
-                       link->stats.recv_fragmented++;
-                       tipc_data_input(link, skb);
-               } else if (!link->reasm_buf) {
-                       tipc_link_reset(link);
+               if (unlikely(!link_is_up(l))) {
+                       rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+                       if (!link_is_up(l)) {
+                               kfree_skb(__skb_dequeue(arrvq));
+                               goto exit;
+                       }
                }
-               break;
-       case BCAST_PROTOCOL:
-               tipc_link_sync_rcv(node, skb);
-               break;
-       default:
-               break;
-       };
+
+               l->silent_intv_cnt = 0;
+
+               /* Forward queues and wake up waiting users */
+               if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+                       tipc_link_advance_backlog(l, xmitq);
+                       if (unlikely(!skb_queue_empty(&l->wakeupq)))
+                               link_prepare_wakeup(l);
+               }
+
+               /* Defer reception if there is a gap in the sequence */
+               seqno = msg_seqno(hdr);
+               rcv_nxt = l->rcv_nxt;
+               if (unlikely(less(rcv_nxt, seqno))) {
+                       l->stats.deferred_recv++;
+                       goto exit;
+               }
+
+               __skb_dequeue(arrvq);
+
+               /* Drop if packet already received */
+               if (unlikely(more(rcv_nxt, seqno))) {
+                       l->stats.duplicates++;
+                       kfree_skb(skb);
+                       goto exit;
+               }
+
+               /* Packet can be delivered */
+               l->rcv_nxt++;
+               l->stats.recv_info++;
+               if (unlikely(!tipc_data_input(l, skb, &tmpq)))
+                       rc = tipc_link_input(l, skb, &tmpq);
+
+               /* Ack at regular intervals */
+               if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
+                       l->rcv_unacked = 0;
+                       l->stats.sent_acks++;
+                       tipc_link_build_proto_msg(l, STATE_MSG,
+                                                 0, 0, 0, 0, xmitq);
+               }
+       }
+exit:
+       tipc_skb_queue_splice_tail(&tmpq, l->inputq);
+       return rc;
 }
 
 /**
@@ -1255,458 +1170,249 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
 }
 
 /*
- * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
+ * Send protocol message to the other endpoint.
  */
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
-                                      struct sk_buff *buf)
+void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
+                         u32 gap, u32 tolerance, u32 priority)
 {
-       u32 seq_no = buf_seqno(buf);
+       struct sk_buff *skb = NULL;
+       struct sk_buff_head xmitq;
 
-       if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
-               tipc_link_proto_rcv(l_ptr, buf);
+       __skb_queue_head_init(&xmitq);
+       tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
+                                 tolerance, priority, &xmitq);
+       skb = __skb_dequeue(&xmitq);
+       if (!skb)
                return;
-       }
-
-       /* Record OOS packet arrival */
-       l_ptr->silent_intv_cnt = 0;
+       tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+       l->rcv_unacked = 0;
+       kfree_skb(skb);
+}
 
-       /*
-        * Discard packet if a duplicate; otherwise add it to deferred queue
-        * and notify peer of gap as per protocol specification
-        */
-       if (less(seq_no, l_ptr->rcv_nxt)) {
-               l_ptr->stats.duplicates++;
-               kfree_skb(buf);
+/* tipc_link_build_proto_msg: prepare link protocol message for transmission
+ */
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+                                     u16 rcvgap, int tolerance, int priority,
+                                     struct sk_buff_head *xmitq)
+{
+       struct sk_buff *skb = NULL;
+       struct tipc_msg *hdr = l->pmsg;
+       u16 snd_nxt = l->snd_nxt;
+       u16 rcv_nxt = l->rcv_nxt;
+       u16 rcv_last = rcv_nxt - 1;
+       int node_up = l->owner->bclink.recv_permitted;
+
+       /* Don't send protocol message during reset or link failover */
+       if (tipc_link_is_blocked(l))
                return;
-       }
 
-       if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
-               l_ptr->stats.deferred_recv++;
-               if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
+       msg_set_type(hdr, mtyp);
+       msg_set_net_plane(hdr, l->net_plane);
+       msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+       msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+       msg_set_link_tolerance(hdr, tolerance);
+       msg_set_linkprio(hdr, priority);
+       msg_set_redundant_link(hdr, node_up);
+       msg_set_seq_gap(hdr, 0);
+
+       /* Compatibility: created msg must not be in sequence with pkt flow */
+       msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+
+       if (mtyp == STATE_MSG) {
+               if (!tipc_link_is_up(l))
+                       return;
+               msg_set_next_sent(hdr, snd_nxt);
+
+               /* Override rcvgap if there are packets in deferred queue */
+               if (!skb_queue_empty(&l->deferdq))
+                       rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+               if (rcvgap) {
+                       msg_set_seq_gap(hdr, rcvgap);
+                       l->stats.sent_nacks++;
+               }
+               msg_set_ack(hdr, rcv_last);
+               msg_set_probe(hdr, probe);
+               if (probe)
+                       l->stats.sent_probes++;
+               l->stats.sent_states++;
        } else {
-               l_ptr->stats.duplicates++;
+               /* RESET_MSG or ACTIVATE_MSG */
+               msg_set_max_pkt(hdr, l->advertised_mtu);
+               msg_set_ack(hdr, l->rcv_nxt - 1);
+               msg_set_next_sent(hdr, 1);
        }
+       skb = tipc_buf_acquire(msg_size(hdr));
+       if (!skb)
+               return;
+       skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
+       skb->priority = TC_PRIO_CONTROL;
+       __skb_queue_tail(xmitq, skb);
 }
 
-/*
- * Send protocol message to the other endpoint.
+/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
+ * with contents of the link's tranmsit and backlog queues.
  */
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
-                         u32 gap, u32 tolerance, u32 priority)
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+                          int mtyp, struct sk_buff_head *xmitq)
 {
-       struct sk_buff *buf = NULL;
-       struct tipc_msg *msg = l_ptr->pmsg;
-       u32 msg_size = sizeof(l_ptr->proto_msg);
-       int r_flag;
-       u16 last_rcv;
-
-       /* Don't send protocol message during link failover */
-       if (l_ptr->flags & LINK_FAILINGOVER)
-               return;
+       struct sk_buff *skb, *tnlskb;
+       struct tipc_msg *hdr, tnlhdr;
+       struct sk_buff_head *queue = &l->transmq;
+       struct sk_buff_head tmpxq, tnlq;
+       u16 pktlen, pktcnt, seqno = l->snd_nxt;
 
-       /* Abort non-RESET send if communication with node is prohibited */
-       if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
+       if (!tnl)
                return;
 
-       /* Create protocol message with "out-of-sequence" sequence number */
-       msg_set_type(msg, msg_typ);
-       msg_set_net_plane(msg, l_ptr->net_plane);
-       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-       msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
-
-       if (msg_typ == STATE_MSG) {
-               u16 next_sent = l_ptr->snd_nxt;
+       skb_queue_head_init(&tnlq);
+       skb_queue_head_init(&tmpxq);
 
-               if (!tipc_link_is_up(l_ptr))
+       /* At least one packet required for safe algorithm => add dummy */
+       skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+                             BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
+                             0, 0, TIPC_ERR_NO_PORT);
+       if (!skb) {
+               pr_warn("%sunable to create tunnel packet\n", link_co_err);
+               return;
+       }
+       skb_queue_tail(&tnlq, skb);
+       tipc_link_xmit(l, &tnlq, &tmpxq);
+       __skb_queue_purge(&tmpxq);
+
+       /* Initialize reusable tunnel packet header */
+       tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
+                     mtyp, INT_H_SIZE, l->addr);
+       pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
+       msg_set_msgcnt(&tnlhdr, pktcnt);
+       msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
+tnl:
+       /* Wrap each packet into a tunnel packet */
+       skb_queue_walk(queue, skb) {
+               hdr = buf_msg(skb);
+               if (queue == &l->backlogq)
+                       msg_set_seqno(hdr, seqno++);
+               pktlen = msg_size(hdr);
+               msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
+               tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+               if (!tnlskb) {
+                       pr_warn("%sunable to send packet\n", link_co_err);
                        return;
-               msg_set_next_sent(msg, next_sent);
-               if (!skb_queue_empty(&l_ptr->deferdq)) {
-                       last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
-                       gap = mod(last_rcv - l_ptr->rcv_nxt);
                }
-               msg_set_seq_gap(msg, gap);
-               if (gap)
-                       l_ptr->stats.sent_nacks++;
-               msg_set_link_tolerance(msg, tolerance);
-               msg_set_linkprio(msg, priority);
-               msg_set_max_pkt(msg, l_ptr->mtu);
-               msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
-               msg_set_probe(msg, probe_msg != 0);
-               if (probe_msg)
-                       l_ptr->stats.sent_probes++;
-               l_ptr->stats.sent_states++;
-       } else {                /* RESET_MSG or ACTIVATE_MSG */
-               msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
-               msg_set_seq_gap(msg, 0);
-               msg_set_next_sent(msg, 1);
-               msg_set_probe(msg, 0);
-               msg_set_link_tolerance(msg, l_ptr->tolerance);
-               msg_set_linkprio(msg, l_ptr->priority);
-               msg_set_max_pkt(msg, l_ptr->advertised_mtu);
+               skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
+               skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
+               __skb_queue_tail(&tnlq, tnlskb);
+       }
+       if (queue != &l->backlogq) {
+               queue = &l->backlogq;
+               goto tnl;
        }
 
-       r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
-       msg_set_redundant_link(msg, r_flag);
-       msg_set_linkprio(msg, l_ptr->priority);
-       msg_set_size(msg, msg_size);
-
-       msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
-
-       buf = tipc_buf_acquire(msg_size);
-       if (!buf)
-               return;
+       tipc_link_xmit(tnl, &tnlq, xmitq);
 
-       skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
-       buf->priority = TC_PRIO_CONTROL;
-       tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
-                        &l_ptr->media_addr);
-       l_ptr->rcv_unacked = 0;
-       kfree_skb(buf);
+       if (mtyp == FAILOVER_MSG) {
+               tnl->drop_point = l->rcv_nxt;
+               tnl->failover_reasm_skb = l->reasm_buf;
+               l->reasm_buf = NULL;
+       }
 }
 
-/*
- * Receive protocol message :
+/* tipc_link_proto_rcv(): receive link level protocol message :
  * Note that network plane id propagates through the network, and may
- * change at any time. The node with lowest address rules
+ * change at any time. The node with lowest numerical id determines
+ * network plane
  */
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
-                               struct sk_buff *buf)
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+                              struct sk_buff_head *xmitq)
 {
-       u32 rec_gap = 0;
-       u32 msg_tol;
-       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_msg *hdr = buf_msg(skb);
+       u16 rcvgap = 0;
+       u16 nacked_gap = msg_seq_gap(hdr);
+       u16 peers_snd_nxt =  msg_next_sent(hdr);
+       u16 peers_tol = msg_link_tolerance(hdr);
+       u16 peers_prio = msg_linkprio(hdr);
+       char *if_name;
+       int rc = 0;
 
-       if (l_ptr->flags & LINK_FAILINGOVER)
+       if (tipc_link_is_blocked(l))
                goto exit;
 
-       if (l_ptr->net_plane != msg_net_plane(msg))
-               if (link_own_addr(l_ptr) > msg_prevnode(msg))
-                       l_ptr->net_plane = msg_net_plane(msg);
-
-       switch (msg_type(msg)) {
+       if (link_own_addr(l) > msg_prevnode(hdr))
+               l->net_plane = msg_net_plane(hdr);
 
+       switch (msg_type(hdr)) {
        case RESET_MSG:
-               if (!link_working_unknown(l_ptr) &&
-                   (l_ptr->peer_session != INVALID_SESSION)) {
-                       if (less_eq(msg_session(msg), l_ptr->peer_session))
-                               break; /* duplicate or old reset: ignore */
-               }
-
-               if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
-                               link_working_unknown(l_ptr))) {
-                       /*
-                        * peer has lost contact -- don't allow peer's links
-                        * to reactivate before we recognize loss & clean up
-                        */
-                       l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
-               }
-
-               link_state_event(l_ptr, RESET_MSG);
 
+               /* Ignore duplicate RESET with old session number */
+               if ((less_eq(msg_session(hdr), l->peer_session)) &&
+                   (l->peer_session != WILDCARD_SESSION))
+                       break;
                /* fall thru' */
-       case ACTIVATE_MSG:
-               /* Update link settings according other endpoint's values */
-               strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
-
-               msg_tol = msg_link_tolerance(msg);
-               if (msg_tol > l_ptr->tolerance)
-                       link_set_supervision_props(l_ptr, msg_tol);
-
-               if (msg_linkprio(msg) > l_ptr->priority)
-                       l_ptr->priority = msg_linkprio(msg);
-
-               if (l_ptr->mtu > msg_max_pkt(msg))
-                       l_ptr->mtu = msg_max_pkt(msg);
 
-               /* Synchronize broadcast link info, if not done previously */
-               if (!tipc_node_is_up(l_ptr->owner)) {
-                       l_ptr->owner->bclink.last_sent =
-                               l_ptr->owner->bclink.last_in =
-                               msg_last_bcast(msg);
-                       l_ptr->owner->bclink.oos_state = 0;
-               }
-
-               l_ptr->peer_session = msg_session(msg);
-               l_ptr->peer_bearer_id = msg_bearer_id(msg);
-
-               if (msg_type(msg) == ACTIVATE_MSG)
-                       link_state_event(l_ptr, ACTIVATE_MSG);
-               break;
-       case STATE_MSG:
+       case ACTIVATE_MSG:
 
-               msg_tol = msg_link_tolerance(msg);
-               if (msg_tol)
-                       link_set_supervision_props(l_ptr, msg_tol);
-
-               if (msg_linkprio(msg) &&
-                   (msg_linkprio(msg) != l_ptr->priority)) {
-                       pr_debug("%s<%s>, priority change %u->%u\n",
-                                link_rst_msg, l_ptr->name,
-                                l_ptr->priority, msg_linkprio(msg));
-                       l_ptr->priority = msg_linkprio(msg);
-                       tipc_link_reset(l_ptr); /* Enforce change to take effect */
+               /* Complete own link name with peer's interface name */
+               if_name =  strrchr(l->name, ':') + 1;
+               if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
                        break;
-               }
-
-               /* Record reception; force mismatch at next timeout: */
-               l_ptr->silent_intv_cnt = 0;
-
-               link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-               l_ptr->stats.recv_states++;
-               if (link_reset_unknown(l_ptr))
+               if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
                        break;
+               strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
 
-               if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
-                       rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
-
-               if (msg_probe(msg))
-                       l_ptr->stats.recv_probes++;
+               /* Update own tolerance if peer indicates a non-zero value */
+               if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+                       l->tolerance = peers_tol;
 
-               /* Protocol message before retransmits, reduce loss risk */
-               if (l_ptr->owner->bclink.recv_permitted)
-                       tipc_bclink_update_link_state(l_ptr->owner,
-                                                     msg_last_bcast(msg));
+               /* Update own priority if peer's priority is higher */
+               if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+                       l->priority = peers_prio;
 
-               if (rec_gap || (msg_probe(msg))) {
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
-                                            rec_gap, 0, 0);
-               }
-               if (msg_seq_gap(msg)) {
-                       l_ptr->stats.recv_nacks++;
-                       tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
-                                            msg_seq_gap(msg));
+               if (msg_type(hdr) == RESET_MSG) {
+                       rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+               } else if (!link_is_up(l)) {
+                       tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+                       rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
                }
+               l->peer_session = msg_session(hdr);
+               l->peer_bearer_id = msg_bearer_id(hdr);
+               if (l->mtu > msg_max_pkt(hdr))
+                       l->mtu = msg_max_pkt(hdr);
                break;
-       }
-exit:
-       kfree_skb(buf);
-}
-
-
-/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
- * a different bearer. Owner node is locked.
- */
-static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
-                                 struct tipc_msg *tunnel_hdr,
-                                 struct tipc_msg *msg,
-                                 u32 selector)
-{
-       struct tipc_link *tunnel;
-       struct sk_buff *skb;
-       u32 length = msg_size(msg);
-
-       tunnel = l_ptr->owner->active_links[selector & 1];
-       if (!tipc_link_is_up(tunnel)) {
-               pr_warn("%stunnel link no longer available\n", link_co_err);
-               return;
-       }
-       msg_set_size(tunnel_hdr, length + INT_H_SIZE);
-       skb = tipc_buf_acquire(length + INT_H_SIZE);
-       if (!skb) {
-               pr_warn("%sunable to send tunnel msg\n", link_co_err);
-               return;
-       }
-       skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
-       skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
-       __tipc_link_xmit_skb(tunnel, skb);
-}
-
 
-/* tipc_link_failover_send_queue(): A link has gone down, but a second
- * link is still active. We can do failover. Tunnel the failing link's
- * whole send queue via the remaining link. This way, we don't lose
- * any packets, and sequence order is preserved for subsequent traffic
- * sent over the remaining link. Owner node is locked.
- */
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
-{
-       int msgcount;
-       struct tipc_link *tunnel = l_ptr->owner->active_links[0];
-       struct tipc_msg tunnel_hdr;
-       struct sk_buff *skb;
-       int split_bundles;
-
-       if (!tunnel)
-               return;
-
-       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
-                     FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
-
-       skb_queue_walk(&l_ptr->backlogq, skb) {
-               msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
-               l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
-       }
-       skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
-       tipc_link_purge_backlog(l_ptr);
-       msgcount = skb_queue_len(&l_ptr->transmq);
-       msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
-       msg_set_msgcnt(&tunnel_hdr, msgcount);
-
-       if (skb_queue_empty(&l_ptr->transmq)) {
-               skb = tipc_buf_acquire(INT_H_SIZE);
-               if (skb) {
-                       skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
-                       msg_set_size(&tunnel_hdr, INT_H_SIZE);
-                       __tipc_link_xmit_skb(tunnel, skb);
-               } else {
-                       pr_warn("%sunable to send changeover msg\n",
-                               link_co_err);
-               }
-               return;
-       }
-
-       split_bundles = (l_ptr->owner->active_links[0] !=
-                        l_ptr->owner->active_links[1]);
-
-       skb_queue_walk(&l_ptr->transmq, skb) {
-               struct tipc_msg *msg = buf_msg(skb);
+       case STATE_MSG:
 
-               if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
-                       struct tipc_msg *m = msg_get_wrapped(msg);
-                       unchar *pos = (unchar *)m;
+               /* Update own tolerance if peer indicates a non-zero value */
+               if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+                       l->tolerance = peers_tol;
 
-                       msgcount = msg_msgcnt(msg);
-                       while (msgcount--) {
-                               msg_set_seqno(m, msg_seqno(msg));
-                               tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
-                                                     msg_link_selector(m));
-                               pos += align(msg_size(m));
-                               m = (struct tipc_msg *)pos;
-                       }
-               } else {
-                       tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
-                                             msg_link_selector(msg));
-               }
-       }
-}
-
-/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
- * duplicate of the first link's send queue via the new link. This way, we
- * are guaranteed that currently queued packets from a socket are delivered
- * before future traffic from the same socket, even if this is using the
- * new link. The last arriving copy of each duplicate packet is dropped at
- * the receiving end by the regular protocol check, so packet cardinality
- * and sequence order is preserved per sender/receiver socket pair.
- * Owner node is locked.
- */
-void tipc_link_dup_queue_xmit(struct tipc_link *link,
-                             struct tipc_link *tnl)
-{
-       struct sk_buff *skb;
-       struct tipc_msg tnl_hdr;
-       struct sk_buff_head *queue = &link->transmq;
-       int mcnt;
-       u16 seqno;
-
-       tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
-                     SYNCH_MSG, INT_H_SIZE, link->addr);
-       mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
-       msg_set_msgcnt(&tnl_hdr, mcnt);
-       msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
-
-tunnel_queue:
-       skb_queue_walk(queue, skb) {
-               struct sk_buff *outskb;
-               struct tipc_msg *msg = buf_msg(skb);
-               u32 len = msg_size(msg);
+               l->silent_intv_cnt = 0;
+               l->stats.recv_states++;
+               if (msg_probe(hdr))
+                       l->stats.recv_probes++;
+               rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+               if (!link_is_up(l))
+                       break;
 
-               msg_set_ack(msg, mod(link->rcv_nxt - 1));
-               msg_set_bcast_ack(msg, link->owner->bclink.last_in);
-               msg_set_size(&tnl_hdr, len + INT_H_SIZE);
-               outskb = tipc_buf_acquire(len + INT_H_SIZE);
-               if (outskb == NULL) {
-                       pr_warn("%sunable to send duplicate msg\n",
-                               link_co_err);
-                       return;
+               /* Send NACK if peer has sent pkts we haven't received yet */
+               if (more(peers_snd_nxt, l->rcv_nxt))
+                       rcvgap = peers_snd_nxt - l->rcv_nxt;
+               if (rcvgap || (msg_probe(hdr)))
+                       tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
+                                                 0, 0, xmitq);
+               tipc_link_release_pkts(l, msg_ack(hdr));
+
+               /* If NACK, retransmit will now start at right position */
+               if (nacked_gap) {
+                       rc = tipc_link_retransm(l, nacked_gap, xmitq);
+                       l->stats.recv_nacks++;
                }
-               skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
-               skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
-                                              skb->data, len);
-               __tipc_link_xmit_skb(tnl, outskb);
-               if (!tipc_link_is_up(link))
-                       return;
-       }
-       if (queue == &link->backlogq)
-               return;
-       seqno = link->snd_nxt;
-       skb_queue_walk(&link->backlogq, skb) {
-               msg_set_seqno(buf_msg(skb), seqno);
-               seqno = mod(seqno + 1);
-       }
-       queue = &link->backlogq;
-       goto tunnel_queue;
-}
 
-/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
- *  Owner node is locked.
- */
-static bool tipc_link_failover_rcv(struct tipc_link *link,
-                                  struct sk_buff **skb)
-{
-       struct tipc_msg *msg = buf_msg(*skb);
-       struct sk_buff *iskb = NULL;
-       struct tipc_link *pl = NULL;
-       int bearer_id = msg_bearer_id(msg);
-       int pos = 0;
-
-       if (msg_type(msg) != FAILOVER_MSG) {
-               pr_warn("%sunknown tunnel pkt received\n", link_co_err);
-               goto exit;
-       }
-       if (bearer_id >= MAX_BEARERS)
-               goto exit;
-
-       if (bearer_id == link->bearer_id)
-               goto exit;
-
-       pl = link->owner->links[bearer_id];
-       if (pl && tipc_link_is_up(pl))
-               tipc_link_reset(pl);
-
-       if (link->failover_pkts == FIRST_FAILOVER)
-               link->failover_pkts = msg_msgcnt(msg);
-
-       /* Should we expect an inner packet? */
-       if (!link->failover_pkts)
-               goto exit;
-
-       if (!tipc_msg_extract(*skb, &iskb, &pos)) {
-               pr_warn("%sno inner failover pkt\n", link_co_err);
-               *skb = NULL;
-               goto exit;
-       }
-       link->failover_pkts--;
-       *skb = NULL;
-
-       /* Was this packet already delivered? */
-       if (less(buf_seqno(iskb), link->failover_checkpt)) {
-               kfree_skb(iskb);
-               iskb = NULL;
-               goto exit;
-       }
-       if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
-               link->stats.recv_fragments++;
-               tipc_buf_append(&link->failover_skb, &iskb);
+               tipc_link_advance_backlog(l, xmitq);
+               if (unlikely(!skb_queue_empty(&l->wakeupq)))
+                       link_prepare_wakeup(l);
        }
 exit:
-       if (!link->failover_pkts && pl)
-               pl->flags &= ~LINK_FAILINGOVER;
-       kfree_skb(*skb);
-       *skb = iskb;
-       return *skb;
-}
-
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
-{
-       unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
-
-       if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
-               return;
-
-       l_ptr->tolerance = tol;
-       l_ptr->keepalive_intv = msecs_to_jiffies(intv);
-       l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
+       kfree_skb(skb);
+       return rc;
 }
 
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1743,7 +1449,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
        list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                tipc_node_lock(n_ptr);
                for (i = 0; i < MAX_BEARERS; i++) {
-                       l_ptr = n_ptr->links[i];
+                       l_ptr = n_ptr->links[i].link;
                        if (l_ptr && !strcmp(l_ptr->name, link_name)) {
                                *bearer_id = i;
                                found_node = n_ptr;
@@ -1770,27 +1476,16 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
        l_ptr->stats.recv_info = l_ptr->rcv_nxt;
 }
 
-static void link_print(struct tipc_link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l, const char *str)
 {
-       struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
-       struct tipc_bearer *b_ptr;
-
-       rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
-       if (b_ptr)
-               pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
-       rcu_read_unlock();
-
-       if (link_working_unknown(l_ptr))
-               pr_cont(":WU\n");
-       else if (link_reset_reset(l_ptr))
-               pr_cont(":RR\n");
-       else if (link_reset_unknown(l_ptr))
-               pr_cont(":RU\n");
-       else if (link_working_working(l_ptr))
-               pr_cont(":WW\n");
-       else
-               pr_cont("\n");
+       struct sk_buff *hskb = skb_peek(&l->transmq);
+       u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+       u16 tail = l->snd_nxt - 1;
+
+       pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
+       pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
+               skb_queue_len(&l->transmq), head, tail,
+               skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
 }
 
 /* Parse and validate nested (link) properties valid for media, bearer and link
@@ -1865,7 +1560,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
 
        tipc_node_lock(node);
 
-       link = node->links[bearer_id];
+       link = node->links[bearer_id].link;
        if (!link) {
                res = -EINVAL;
                goto out;
@@ -1885,7 +1580,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
                        u32 tol;
 
                        tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
-                       link_set_supervision_props(link, tol);
+                       link->tolerance = tol;
                        tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
                }
                if (props[TIPC_NLA_PROP_PRIO]) {
@@ -2055,10 +1750,11 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
        for (i = *prev_link; i < MAX_BEARERS; i++) {
                *prev_link = i;
 
-               if (!node->links[i])
+               if (!node->links[i].link)
                        continue;
 
-               err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
+               err = __tipc_nl_add_link(net, msg,
+                                        node->links[i].link, NLM_F_MULTI);
                if (err)
                        return err;
        }
@@ -2172,7 +1868,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
 
                tipc_node_lock(node);
-               link = node->links[bearer_id];
+               link = node->links[bearer_id].link;
                if (!link) {
                        tipc_node_unlock(node);
                        nlmsg_free(msg.skb);
@@ -2227,7 +1923,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
 
        tipc_node_lock(node);
 
-       link = node->links[bearer_id];
+       link = node->links[bearer_id].link;
        if (!link) {
                tipc_node_unlock(node);
                return -EINVAL;
index ae0a0ea572f2961aca2617f9244ea74ebba15c6a..39ff8b6919a4271d31892992098be0fcf98acd0f 100644 (file)
  */
 #define INVALID_LINK_SEQ 0x10000
 
-/* Link working states
+/* Link FSM events:
  */
-#define WORKING_WORKING 560810u
-#define WORKING_UNKNOWN 560811u
-#define RESET_UNKNOWN   560812u
-#define RESET_RESET     560813u
+enum {
+       LINK_ESTABLISH_EVT       = 0xec1ab1e,
+       LINK_PEER_RESET_EVT      = 0x9eed0e,
+       LINK_FAILURE_EVT         = 0xfa110e,
+       LINK_RESET_EVT           = 0x10ca1d0e,
+       LINK_FAILOVER_BEGIN_EVT  = 0xfa110bee,
+       LINK_FAILOVER_END_EVT    = 0xfa110ede,
+       LINK_SYNCH_BEGIN_EVT     = 0xc1ccbee,
+       LINK_SYNCH_END_EVT       = 0xc1ccede
+};
 
-/* Link endpoint execution states
+/* Events returned from link at packet reception or at timeout
  */
-#define LINK_STARTED     0x0001
-#define LINK_STOPPED     0x0002
-#define LINK_SYNCHING    0x0004
-#define LINK_FAILINGOVER 0x0008
+enum {
+       TIPC_LINK_UP_EVT       = 1,
+       TIPC_LINK_DOWN_EVT     = (1 << 1)
+};
 
 /* Starting value for maximum packet size negotiation on unicast links
  * (unless bearer MTU is less)
@@ -106,7 +112,6 @@ struct tipc_stats {
  * @timer: link timer
  * @owner: pointer to peer node
  * @refcnt: reference counter for permanent references (owner node & timer)
- * @flags: execution state flags for link endpoint instance
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
  * @bearer_id: local bearer id used by link
@@ -143,20 +148,17 @@ struct tipc_stats {
 struct tipc_link {
        u32 addr;
        char name[TIPC_MAX_LINK_NAME];
-       struct tipc_media_addr media_addr;
-       struct timer_list timer;
+       struct tipc_media_addr *media_addr;
        struct tipc_node *owner;
-       struct kref ref;
 
        /* Management and link supervision data */
-       unsigned int flags;
        u32 peer_session;
        u32 peer_bearer_id;
        u32 bearer_id;
        u32 tolerance;
        unsigned long keepalive_intv;
        u32 abort_limit;
-       int state;
+       u32 state;
        u32 silent_intv_cnt;
        struct {
                unchar hdr[INT_H_SIZE];
@@ -165,12 +167,10 @@ struct tipc_link {
        struct tipc_msg *pmsg;
        u32 priority;
        char net_plane;
-       u16 synch_point;
 
-       /* Failover */
-       u16 failover_pkts;
-       u16 failover_checkpt;
-       struct sk_buff *failover_skb;
+       /* Failover/synch */
+       u16 drop_point;
+       struct sk_buff *failover_reasm_skb;
 
        /* Max packet negotiation */
        u16 mtu;
@@ -192,8 +192,8 @@ struct tipc_link {
        u16 rcv_nxt;
        u32 rcv_unacked;
        struct sk_buff_head deferdq;
-       struct sk_buff_head inputq;
-       struct sk_buff_head namedq;
+       struct sk_buff_head *inputq;
+       struct sk_buff_head *namedq;
 
        /* Congestion handling */
        struct sk_buff_head wakeupq;
@@ -205,28 +205,29 @@ struct tipc_link {
        struct tipc_stats stats;
 };
 
-struct tipc_port;
-
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-                             struct tipc_bearer *b_ptr,
-                             const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+                     u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+                     struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+                     struct tipc_link **link);
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+                          int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+                                   struct sk_buff_head *xmitq);
+int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
-int tipc_link_is_up(struct tipc_link *l_ptr);
+bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_synching(struct tipc_link *l);
+bool tipc_link_is_failingover(struct tipc_link *l);
+bool tipc_link_is_blocked(struct tipc_link *l);
 int tipc_link_is_active(struct tipc_link *l_ptr);
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
 void tipc_link_purge_backlog(struct tipc_link *l);
-void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
-                      u32 selector);
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
-                  u32 selector);
 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
                     struct sk_buff_head *list);
+int tipc_link_xmit(struct tipc_link *link,     struct sk_buff_head *list,
+                  struct sk_buff_head *xmitq);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority);
 void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -242,34 +243,8 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
-void link_prepare_wakeup(struct tipc_link *l);
-
-static inline u32 link_own_addr(struct tipc_link *l)
-{
-       return msg_prevnode(l->pmsg);
-}
-
-/*
- * Link status checking routines
- */
-static inline int link_working_working(struct tipc_link *l_ptr)
-{
-       return l_ptr->state == WORKING_WORKING;
-}
-
-static inline int link_working_unknown(struct tipc_link *l_ptr)
-{
-       return l_ptr->state == WORKING_UNKNOWN;
-}
-
-static inline int link_reset_unknown(struct tipc_link *l_ptr)
-{
-       return l_ptr->state == RESET_UNKNOWN;
-}
-
-static inline int link_reset_reset(struct tipc_link *l_ptr)
-{
-       return l_ptr->state == RESET_RESET;
-}
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+                 struct sk_buff_head *xmitq);
 
 #endif
index 08b4cc7d496d94c80fb2fcacc30ade0192fb0dc8..562c926a51cc7baa859115b6a0d444f73febf357 100644 (file)
@@ -463,60 +463,72 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
 
 /**
  * tipc_msg_reverse(): swap source and destination addresses and add error code
- * @buf:  buffer containing message to be reversed
- * @dnode: return value: node where to send message after reversal
- * @err:  error code to be set in message
- * Consumes buffer if failure
+ * @own_node: originating node id for reversed message
+ * @skb:  buffer containing message to be reversed; may be replaced.
+ * @err:  error code to be set in message, if any
+ * Consumes buffer at failure
  * Returns true if success, otherwise false
  */
-bool tipc_msg_reverse(u32 own_addr,  struct sk_buff *buf, u32 *dnode,
-                     int err)
+bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 {
-       struct tipc_msg *msg = buf_msg(buf);
+       struct sk_buff *_skb = *skb;
+       struct tipc_msg *hdr = buf_msg(_skb);
        struct tipc_msg ohdr;
-       uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+       int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
 
-       if (skb_linearize(buf))
+       if (skb_linearize(_skb))
                goto exit;
-       msg = buf_msg(buf);
-       if (msg_dest_droppable(msg))
+       hdr = buf_msg(_skb);
+       if (msg_dest_droppable(hdr))
                goto exit;
-       if (msg_errcode(msg))
+       if (msg_errcode(hdr))
                goto exit;
-       memcpy(&ohdr, msg, msg_hdr_sz(msg));
-       msg_set_errcode(msg, err);
-       msg_set_origport(msg, msg_destport(&ohdr));
-       msg_set_destport(msg, msg_origport(&ohdr));
-       msg_set_prevnode(msg, own_addr);
-       if (!msg_short(msg)) {
-               msg_set_orignode(msg, msg_destnode(&ohdr));
-               msg_set_destnode(msg, msg_orignode(&ohdr));
+
+       /* Take a copy of original header before altering message */
+       memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
+
+       /* Never return SHORT header; expand by replacing buffer if necessary */
+       if (msg_short(hdr)) {
+               *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+               if (!*skb)
+                       goto exit;
+               memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
+               kfree_skb(_skb);
+               _skb = *skb;
+               hdr = buf_msg(_skb);
+               memcpy(hdr, &ohdr, BASIC_H_SIZE);
+               msg_set_hdr_sz(hdr, BASIC_H_SIZE);
        }
-       msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
-       skb_trim(buf, msg_size(msg));
-       skb_orphan(buf);
-       *dnode = msg_orignode(&ohdr);
+
+       /* Now reverse the concerned fields */
+       msg_set_errcode(hdr, err);
+       msg_set_origport(hdr, msg_destport(&ohdr));
+       msg_set_destport(hdr, msg_origport(&ohdr));
+       msg_set_destnode(hdr, msg_prevnode(&ohdr));
+       msg_set_prevnode(hdr, own_node);
+       msg_set_orignode(hdr, own_node);
+       msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
+       skb_trim(_skb, msg_size(hdr));
+       skb_orphan(_skb);
        return true;
 exit:
-       kfree_skb(buf);
-       *dnode = 0;
+       kfree_skb(_skb);
+       *skb = NULL;
        return false;
 }
 
 /**
  * tipc_msg_lookup_dest(): try to find new destination for named message
  * @skb: the buffer containing the message.
- * @dnode: return value: next-hop node, if destination found
- * @err: return value: error code to use, if message to be rejected
+ * @err: error code to be used by caller if lookup fails
  * Does not consume buffer
  * Returns true if a destination is found, false otherwise
  */
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
-                         u32 *dnode, int *err)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 {
        struct tipc_msg *msg = buf_msg(skb);
-       u32 dport;
-       u32 own_addr = tipc_own_addr(net);
+       u32 dport, dnode;
+       u32 onode = tipc_own_addr(net);
 
        if (!msg_isdata(msg))
                return false;
@@ -529,15 +541,15 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
                return false;
        if (msg_reroute_cnt(msg))
                return false;
-       *dnode = addr_domain(net, msg_lookup_scope(msg));
+       dnode = addr_domain(net, msg_lookup_scope(msg));
        dport = tipc_nametbl_translate(net, msg_nametype(msg),
-                                      msg_nameinst(msg), dnode);
+                                      msg_nameinst(msg), &dnode);
        if (!dport)
                return false;
        msg_incr_reroute_cnt(msg);
-       if (*dnode != own_addr)
-               msg_set_prevnode(msg, own_addr);
-       msg_set_destnode(msg, *dnode);
+       if (dnode != onode)
+               msg_set_prevnode(msg, onode);
+       msg_set_destnode(msg, dnode);
        msg_set_destport(msg, dport);
        *err = TIPC_OK;
        return true;
index 19c45fb66238816f6084a706e9ab7784a31faa8c..a82c5848d4bc22129bd1e6ba7f677795febdc9e9 100644 (file)
@@ -38,6 +38,7 @@
 #define _TIPC_MSG_H
 
 #include <linux/tipc.h>
+#include "core.h"
 
 /*
  * Constants and routines used to read and write TIPC payload message headers
@@ -109,7 +110,6 @@ struct tipc_skb_cb {
        struct sk_buff *tail;
        bool validated;
        bool wakeup_pending;
-       bool bundling;
        u16 chain_sz;
        u16 chain_imp;
 };
@@ -558,15 +558,6 @@ static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 1, 15, 0x1fff, n);
 }
 
-static inline bool msg_dup(struct tipc_msg *m)
-{
-       if (likely(msg_user(m) != TUNNEL_PROTOCOL))
-               return false;
-       if (msg_type(m) != SYNCH_MSG)
-               return false;
-       return true;
-}
-
 /*
  * Word 2
  */
@@ -620,12 +611,12 @@ static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
 }
 
 
-static inline u32 msg_next_sent(struct tipc_msg *m)
+static inline u16 msg_next_sent(struct tipc_msg *m)
 {
        return msg_bits(m, 4, 0, 0xffff);
 }
 
-static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 4, 0, 0xffff, n);
 }
@@ -658,12 +649,12 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
 /*
  * Word 5
  */
-static inline u32 msg_session(struct tipc_msg *m)
+static inline u16 msg_session(struct tipc_msg *m)
 {
        return msg_bits(m, 5, 16, 0xffff);
 }
 
-static inline void msg_set_session(struct tipc_msg *m, u32 n)
+static inline void msg_set_session(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 5, 16, 0xffff, n);
 }
@@ -726,12 +717,12 @@ static inline char *msg_media_addr(struct tipc_msg *m)
 /*
  * Word 9
  */
-static inline u32 msg_msgcnt(struct tipc_msg *m)
+static inline u16 msg_msgcnt(struct tipc_msg *m)
 {
        return msg_bits(m, 9, 16, 0xffff);
 }
 
-static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 9, 16, 0xffff, n);
 }
@@ -766,10 +757,25 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 9, 0, 0xffff, n);
 }
 
+static inline bool msg_peer_link_is_up(struct tipc_msg *m)
+{
+       if (likely(msg_user(m) != LINK_PROTOCOL))
+               return true;
+       if (msg_type(m) == STATE_MSG)
+               return true;
+       return false;
+}
+
+static inline bool msg_peer_node_is_up(struct tipc_msg *m)
+{
+       if (msg_peer_link_is_up(m))
+               return true;
+       return msg_redundant_link(m);
+}
+
 struct sk_buff *tipc_buf_acquire(u32 size);
 bool tipc_msg_validate(struct sk_buff *skb);
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
-                     int err);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
 void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
                   u32 hsize, u32 destnode);
 struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
@@ -782,8 +788,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                   int offset, int dsz, int mtu, struct sk_buff_head *list);
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
-                         int *err);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
 static inline u16 buf_seqno(struct sk_buff *skb)
@@ -857,26 +862,65 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
        return skb;
 }
 
-/* tipc_skb_queue_tail(): add buffer to tail of list;
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
  * @list: list to be appended to
- * @skb: buffer to append. Always appended
- * @dport: the destination port of the buffer
- * returns true if dport differs from previous destination
+ * @skb: buffer to add
+ * Returns true if queue should treated further, otherwise false
  */
-static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
-                                      struct sk_buff *skb, u32 dport)
+static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
+                                          struct sk_buff *skb)
 {
-       struct sk_buff *_skb = NULL;
-       bool rv = false;
+       struct sk_buff *_skb, *tmp;
+       struct tipc_msg *hdr = buf_msg(skb);
+       u16 seqno = msg_seqno(hdr);
 
-       spin_lock_bh(&list->lock);
-       _skb = skb_peek_tail(list);
-       if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
-           (skb_queue_len(list) > 32))
-               rv = true;
+       if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
+               __skb_queue_head(list, skb);
+               return true;
+       }
+       if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
+               __skb_queue_head(list, skb);
+               return true;
+       }
+       if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
+               skb_queue_walk_safe(list, _skb, tmp) {
+                       if (likely(less(seqno, buf_seqno(_skb)))) {
+                               __skb_queue_before(list, _skb, skb);
+                               return true;
+                       }
+               }
+       }
        __skb_queue_tail(list, skb);
+       return false;
+}
+
+/* tipc_skb_queue_splice_tail - append an skb list to lock protected list
+ * @list: the new list to append. Not lock protected
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail(struct sk_buff_head *list,
+                                             struct sk_buff_head *head)
+{
+       spin_lock_bh(&head->lock);
+       skb_queue_splice_tail(list, head);
+       spin_unlock_bh(&head->lock);
+}
+
+/* tipc_skb_queue_splice_tail_init - merge two lock protected skb lists
+ * @list: the new list to add. Lock protected. Will be reinitialized
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
+                                                  struct sk_buff_head *head)
+{
+       struct sk_buff_head tmp;
+
+       __skb_queue_head_init(&tmp);
+
+       spin_lock_bh(&list->lock);
+       skb_queue_splice_tail_init(list, &tmp);
        spin_unlock_bh(&list->lock);
-       return rv;
+       tipc_skb_queue_splice_tail(&tmp, head);
 }
 
 #endif
index 41e7b7e4dda0818469c17ff8b6e48aa1654a23ff..e6018b7eb1970dfc85bc7e0dc8945ccf45a72180 100644 (file)
@@ -96,13 +96,13 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb)
                dnode = node->addr;
                if (in_own_node(net, dnode))
                        continue;
-               if (!tipc_node_active_links(node))
+               if (!tipc_node_is_up(node))
                        continue;
                oskb = pskb_copy(skb, GFP_ATOMIC);
                if (!oskb)
                        break;
                msg_set_destnode(buf_msg(oskb), dnode);
-               tipc_link_xmit_skb(net, oskb, dnode, dnode);
+               tipc_node_xmit_skb(net, oskb, dnode, dnode);
        }
        rcu_read_unlock();
 
@@ -223,7 +223,7 @@ void tipc_named_node_up(struct net *net, u32 dnode)
                         &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
        rcu_read_unlock();
 
-       tipc_link_xmit(net, &head, dnode, dnode);
+       tipc_node_xmit(net, &head, dnode, dnode);
 }
 
 static void tipc_publ_subscribe(struct net *net, struct publication *publ,
index 0b1d61a5f85334b3553780e8c0dd64c3f0549aa3..7c191641b44f64c080745df6615a8eccb237dd38 100644 (file)
 #include "name_distr.h"
 #include "socket.h"
 #include "bcast.h"
+#include "discover.h"
 
-static void node_lost_contact(struct tipc_node *n_ptr);
+/* Node FSM states and events:
+ */
+enum {
+       SELF_DOWN_PEER_DOWN    = 0xdd,
+       SELF_UP_PEER_UP        = 0xaa,
+       SELF_DOWN_PEER_LEAVING = 0xd1,
+       SELF_UP_PEER_COMING    = 0xac,
+       SELF_COMING_PEER_UP    = 0xca,
+       SELF_LEAVING_PEER_DOWN = 0x1d,
+       NODE_FAILINGOVER       = 0xf0,
+       NODE_SYNCHING          = 0xcc
+};
+
+enum {
+       SELF_ESTABL_CONTACT_EVT = 0xece,
+       SELF_LOST_CONTACT_EVT   = 0x1ce,
+       PEER_ESTABL_CONTACT_EVT = 0x9ece,
+       PEER_LOST_CONTACT_EVT   = 0x91ce,
+       NODE_FAILOVER_BEGIN_EVT = 0xfbe,
+       NODE_FAILOVER_END_EVT   = 0xfee,
+       NODE_SYNCH_BEGIN_EVT    = 0xcbe,
+       NODE_SYNCH_END_EVT      = 0xcee
+};
+
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+                                 struct sk_buff_head *xmitq,
+                                 struct tipc_media_addr **maddr);
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
+                               bool delete);
+static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
 static void node_established_contact(struct tipc_node *n_ptr);
 static void tipc_node_delete(struct tipc_node *node);
+static void tipc_node_timeout(unsigned long data);
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
 
 struct tipc_sock_conn {
        u32 port;
@@ -110,7 +142,7 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
        return NULL;
 }
 
-struct tipc_node *tipc_node_create(struct net *net, u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *n_ptr, *temp_node;
@@ -126,12 +158,14 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
        }
        n_ptr->addr = addr;
        n_ptr->net = net;
+       n_ptr->capabilities = capabilities;
        kref_init(&n_ptr->kref);
        spin_lock_init(&n_ptr->lock);
        INIT_HLIST_NODE(&n_ptr->hash);
        INIT_LIST_HEAD(&n_ptr->list);
        INIT_LIST_HEAD(&n_ptr->publ_list);
        INIT_LIST_HEAD(&n_ptr->conn_sks);
+       skb_queue_head_init(&n_ptr->bclink.namedq);
        __skb_queue_head_init(&n_ptr->bclink.deferdq);
        hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
        list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
@@ -139,14 +173,32 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
                        break;
        }
        list_add_tail_rcu(&n_ptr->list, &temp_node->list);
-       n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
+       n_ptr->state = SELF_DOWN_PEER_LEAVING;
        n_ptr->signature = INVALID_NODE_SIG;
+       n_ptr->active_links[0] = INVALID_BEARER_ID;
+       n_ptr->active_links[1] = INVALID_BEARER_ID;
        tipc_node_get(n_ptr);
+       setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
+       n_ptr->keepalive_intv = U32_MAX;
 exit:
        spin_unlock_bh(&tn->node_list_lock);
        return n_ptr;
 }
 
+static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
+{
+       unsigned long tol = l->tolerance;
+       unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
+       unsigned long keepalive_intv = msecs_to_jiffies(intv);
+
+       /* Link with lowest tolerance determines timer interval */
+       if (keepalive_intv < n->keepalive_intv)
+               n->keepalive_intv = keepalive_intv;
+
+       /* Ensure link's abort limit corresponds to current interval */
+       l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
+}
+
 static void tipc_node_delete(struct tipc_node *node)
 {
        list_del_rcu(&node->list);
@@ -160,8 +212,11 @@ void tipc_node_stop(struct net *net)
        struct tipc_node *node, *t_node;
 
        spin_lock_bh(&tn->node_list_lock);
-       list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+       list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
+               if (del_timer(&node->timer))
+                       tipc_node_put(node);
                tipc_node_put(node);
+       }
        spin_unlock_bh(&tn->node_list_lock);
 }
 
@@ -219,158 +274,547 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
        tipc_node_put(node);
 }
 
+/* tipc_node_timeout - handle expiration of node timer
+ */
+static void tipc_node_timeout(unsigned long data)
+{
+       struct tipc_node *n = (struct tipc_node *)data;
+       struct tipc_link_entry *le;
+       struct sk_buff_head xmitq;
+       int bearer_id;
+       int rc = 0;
+
+       __skb_queue_head_init(&xmitq);
+
+       for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+               tipc_node_lock(n);
+               le = &n->links[bearer_id];
+               if (le->link) {
+                       /* Link tolerance may change asynchronously: */
+                       tipc_node_calculate_timer(n, le->link);
+                       rc = tipc_link_timeout(le->link, &xmitq);
+               }
+               tipc_node_unlock(n);
+               tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+               if (rc & TIPC_LINK_DOWN_EVT)
+                       tipc_node_link_down(n, bearer_id, false);
+       }
+       if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+               tipc_node_get(n);
+       tipc_node_put(n);
+}
+
 /**
- * tipc_node_link_up - handle addition of link
- *
+ * __tipc_node_link_up - handle addition of link
+ * Node lock must be held by caller
  * Link becomes active (alone or shared) or standby, depending on its priority.
  */
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
+                               struct sk_buff_head *xmitq)
 {
-       struct tipc_link **active = &n_ptr->active_links[0];
+       int *slot0 = &n->active_links[0];
+       int *slot1 = &n->active_links[1];
+       struct tipc_link *ol = node_active_link(n, 0);
+       struct tipc_link *nl = n->links[bearer_id].link;
 
-       n_ptr->working_links++;
-       n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
-       n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+       if (!nl || !tipc_link_is_up(nl))
+               return;
 
-       pr_debug("Established link <%s> on network plane %c\n",
-                l_ptr->name, l_ptr->net_plane);
+       n->working_links++;
+       n->action_flags |= TIPC_NOTIFY_LINK_UP;
+       n->link_id = nl->peer_bearer_id << 16 | bearer_id;
 
-       if (!active[0]) {
-               active[0] = active[1] = l_ptr;
-               node_established_contact(n_ptr);
-               goto exit;
-       }
-       if (l_ptr->priority < active[0]->priority) {
-               pr_debug("New link <%s> becomes standby\n", l_ptr->name);
-               goto exit;
+       /* Leave room for tunnel header when returning 'mtu' to users: */
+       n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
+
+       tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+
+       pr_debug("Established link <%s> on network plane %c\n",
+                nl->name, nl->net_plane);
+
+       /* First link? => give it both slots */
+       if (!ol) {
+               *slot0 = bearer_id;
+               *slot1 = bearer_id;
+               tipc_link_build_bcast_sync_msg(nl, xmitq);
+               node_established_contact(n);
+               return;
        }
-       tipc_link_dup_queue_xmit(active[0], l_ptr);
-       if (l_ptr->priority == active[0]->priority) {
-               active[0] = l_ptr;
-               goto exit;
+
+       /* Second link => redistribute slots */
+       if (nl->priority > ol->priority) {
+               pr_debug("Old link <%s> becomes standby\n", ol->name);
+               *slot0 = bearer_id;
+               *slot1 = bearer_id;
+       } else if (nl->priority == ol->priority) {
+               *slot0 = bearer_id;
+       } else {
+               pr_debug("New link <%s> is standby\n", nl->name);
        }
-       pr_debug("Old link <%s> becomes standby\n", active[0]->name);
-       if (active[1] != active[0])
-               pr_debug("Old link <%s> becomes standby\n", active[1]->name);
-       active[0] = active[1] = l_ptr;
-exit:
-       /* Leave room for changeover header when returning 'mtu' to users: */
-       n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
-       n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+
+       /* Prepare synchronization with first link */
+       tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
 }
 
 /**
- * node_select_active_links - select active link
+ * tipc_node_link_up - handle addition of link
+ *
+ * Link becomes active (alone or shared) or standby, depending on its priority.
  */
-static void node_select_active_links(struct tipc_node *n_ptr)
+static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
+                             struct sk_buff_head *xmitq)
 {
-       struct tipc_link **active = &n_ptr->active_links[0];
-       u32 i;
-       u32 highest_prio = 0;
+       tipc_node_lock(n);
+       __tipc_node_link_up(n, bearer_id, xmitq);
+       tipc_node_unlock(n);
+}
 
-       active[0] = active[1] = NULL;
+/**
+ * __tipc_node_link_down - handle loss of link
+ */
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+                                 struct sk_buff_head *xmitq,
+                                 struct tipc_media_addr **maddr)
+{
+       struct tipc_link_entry *le = &n->links[*bearer_id];
+       int *slot0 = &n->active_links[0];
+       int *slot1 = &n->active_links[1];
+       int i, highest = 0;
+       struct tipc_link *l, *_l, *tnl;
+
+       l = n->links[*bearer_id].link;
+       if (!l || tipc_link_is_reset(l))
+               return;
 
-       for (i = 0; i < MAX_BEARERS; i++) {
-               struct tipc_link *l_ptr = n_ptr->links[i];
+       n->working_links--;
+       n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
+       n->link_id = l->peer_bearer_id << 16 | *bearer_id;
 
-               if (!l_ptr || !tipc_link_is_up(l_ptr) ||
-                   (l_ptr->priority < highest_prio))
-                       continue;
+       tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
+
+       pr_debug("Lost link <%s> on network plane %c\n",
+                l->name, l->net_plane);
 
-               if (l_ptr->priority > highest_prio) {
-                       highest_prio = l_ptr->priority;
-                       active[0] = active[1] = l_ptr;
-               } else {
-                       active[1] = l_ptr;
+       /* Select new active link if any available */
+       *slot0 = INVALID_BEARER_ID;
+       *slot1 = INVALID_BEARER_ID;
+       for (i = 0; i < MAX_BEARERS; i++) {
+               _l = n->links[i].link;
+               if (!_l || !tipc_link_is_up(_l))
+                       continue;
+               if (_l == l)
+                       continue;
+               if (_l->priority < highest)
+                       continue;
+               if (_l->priority > highest) {
+                       highest = _l->priority;
+                       *slot0 = i;
+                       *slot1 = i;
+                       continue;
                }
+               *slot1 = i;
+       }
+
+       if (!tipc_node_is_up(n)) {
+               tipc_link_reset(l);
+               node_lost_contact(n, &le->inputq);
+               return;
        }
+
+       /* There is still a working link => initiate failover */
+       tnl = node_active_link(n, 0);
+       n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
+       tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+       tipc_link_reset(l);
+       tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+       tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+       *maddr = &n->links[tnl->bearer_id].maddr;
+       *bearer_id = tnl->bearer_id;
 }
 
-/**
- * tipc_node_link_down - handle loss of link
- */
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
 {
-       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
-       struct tipc_link **active;
+       struct tipc_link_entry *le = &n->links[bearer_id];
+       struct tipc_media_addr *maddr;
+       struct sk_buff_head xmitq;
+
+       __skb_queue_head_init(&xmitq);
+
+       tipc_node_lock(n);
+       __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+       if (delete && le->link) {
+               kfree(le->link);
+               le->link = NULL;
+               n->link_cnt--;
+       }
+       tipc_node_unlock(n);
 
-       n_ptr->working_links--;
-       n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
-       n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+       tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+       tipc_sk_rcv(n->net, &le->inputq);
+}
 
-       if (!tipc_link_is_active(l_ptr)) {
-               pr_debug("Lost standby link <%s> on network plane %c\n",
-                        l_ptr->name, l_ptr->net_plane);
-               return;
-       }
-       pr_debug("Lost link <%s> on network plane %c\n",
-                l_ptr->name, l_ptr->net_plane);
-
-       active = &n_ptr->active_links[0];
-       if (active[0] == l_ptr)
-               active[0] = active[1];
-       if (active[1] == l_ptr)
-               active[1] = active[0];
-       if (active[0] == l_ptr)
-               node_select_active_links(n_ptr);
-       if (tipc_node_is_up(n_ptr))
-               tipc_link_failover_send_queue(l_ptr);
-       else
-               node_lost_contact(n_ptr);
-
-       /* Leave room for changeover header when returning 'mtu' to users: */
-       if (active[0]) {
-               n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
-               n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+bool tipc_node_is_up(struct tipc_node *n)
+{
+       return n->active_links[0] != INVALID_BEARER_ID;
+}
+
+void tipc_node_check_dest(struct net *net, u32 onode,
+                         struct tipc_bearer *b,
+                         u16 capabilities, u32 signature,
+                         struct tipc_media_addr *maddr,
+                         bool *respond, bool *dupl_addr)
+{
+       struct tipc_node *n;
+       struct tipc_link *l;
+       struct tipc_link_entry *le;
+       bool addr_match = false;
+       bool sign_match = false;
+       bool link_up = false;
+       bool accept_addr = false;
+       bool reset = true;
+
+       *dupl_addr = false;
+       *respond = false;
+
+       n = tipc_node_create(net, onode, capabilities);
+       if (!n)
                return;
+
+       tipc_node_lock(n);
+
+       le = &n->links[b->identity];
+
+       /* Prepare to validate requesting node's signature and media address */
+       l = le->link;
+       link_up = l && tipc_link_is_up(l);
+       addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+       sign_match = (signature == n->signature);
+
+       /* These three flags give us eight permutations: */
+
+       if (sign_match && addr_match && link_up) {
+               /* All is fine. Do nothing. */
+               reset = false;
+       } else if (sign_match && addr_match && !link_up) {
+               /* Respond. The link will come up in due time */
+               *respond = true;
+       } else if (sign_match && !addr_match && link_up) {
+               /* Peer has changed i/f address without rebooting.
+                * If so, the link will reset soon, and the next
+                * discovery will be accepted. So we can ignore it.
+                * It may also be an cloned or malicious peer having
+                * chosen the same node address and signature as an
+                * existing one.
+                * Ignore requests until the link goes down, if ever.
+                */
+               *dupl_addr = true;
+       } else if (sign_match && !addr_match && !link_up) {
+               /* Peer link has changed i/f address without rebooting.
+                * It may also be a cloned or malicious peer; we can't
+                * distinguish between the two.
+                * The signature is correct, so we must accept.
+                */
+               accept_addr = true;
+               *respond = true;
+       } else if (!sign_match && addr_match && link_up) {
+               /* Peer node rebooted. Two possibilities:
+                *  - Delayed re-discovery; this link endpoint has already
+                *    reset and re-established contact with the peer, before
+                *    receiving a discovery message from that node.
+                *    (The peer happened to receive one from this node first).
+                *  - The peer came back so fast that our side has not
+                *    discovered it yet. Probing from this side will soon
+                *    reset the link, since there can be no working link
+                *    endpoint at the peer end, and the link will re-establish.
+                *  Accept the signature, since it comes from a known peer.
+                */
+               n->signature = signature;
+       } else if (!sign_match && addr_match && !link_up) {
+               /*  The peer node has rebooted.
+                *  Accept signature, since it is a known peer.
+                */
+               n->signature = signature;
+               *respond = true;
+       } else if (!sign_match && !addr_match && link_up) {
+               /* Peer rebooted with new address, or a new/duplicate peer.
+                * Ignore until the link goes down, if ever.
+                */
+               *dupl_addr = true;
+       } else if (!sign_match && !addr_match && !link_up) {
+               /* Peer rebooted with new address, or it is a new peer.
+                * Accept signature and address.
+                */
+               n->signature = signature;
+               accept_addr = true;
+               *respond = true;
        }
-       /* Loopback link went down? No fragmentation needed from now on. */
-       if (n_ptr->addr == tn->own_addr) {
-               n_ptr->act_mtus[0] = MAX_MSG_SIZE;
-               n_ptr->act_mtus[1] = MAX_MSG_SIZE;
+
+       if (!accept_addr)
+               goto exit;
+
+       /* Now create new link if not already existing */
+       if (!l) {
+               if (n->link_cnt == 2) {
+                       pr_warn("Cannot establish 3rd link to %x\n", n->addr);
+                       goto exit;
+               }
+               if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
+                                     tipc_own_addr(net), onode, &le->maddr,
+                                     &le->inputq, &n->bclink.namedq, &l)) {
+                       *respond = false;
+                       goto exit;
+               }
+               tipc_link_reset(l);
+               le->link = l;
+               n->link_cnt++;
+               tipc_node_calculate_timer(n, l);
+               if (n->link_cnt == 1)
+                       if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+                               tipc_node_get(n);
        }
+       memcpy(&le->maddr, maddr, sizeof(*maddr));
+exit:
+       tipc_node_unlock(n);
+       if (reset)
+               tipc_node_link_down(n, b->identity, false);
+       tipc_node_put(n);
 }
 
-int tipc_node_active_links(struct tipc_node *n_ptr)
+void tipc_node_delete_links(struct net *net, int bearer_id)
 {
-       return n_ptr->active_links[0] != NULL;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_node *n;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(n, &tn->node_list, list) {
+               tipc_node_link_down(n, bearer_id, true);
+       }
+       rcu_read_unlock();
 }
 
-int tipc_node_is_up(struct tipc_node *n_ptr)
+static void tipc_node_reset_links(struct tipc_node *n)
 {
-       return tipc_node_active_links(n_ptr);
+       char addr_string[16];
+       int i;
+
+       pr_warn("Resetting all links to %s\n",
+               tipc_addr_string_fill(addr_string, n->addr));
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               tipc_node_link_down(n, i, false);
+       }
 }
 
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+/* tipc_node_fsm_evt - node finite state machine
+ * Determines when contact is allowed with peer node
+ */
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
 {
-       n_ptr->links[l_ptr->bearer_id] = l_ptr;
-       n_ptr->link_cnt++;
+       int state = n->state;
+
+       switch (state) {
+       case SELF_DOWN_PEER_DOWN:
+               switch (evt) {
+               case SELF_ESTABL_CONTACT_EVT:
+                       state = SELF_UP_PEER_COMING;
+                       break;
+               case PEER_ESTABL_CONTACT_EVT:
+                       state = SELF_COMING_PEER_UP;
+                       break;
+               case SELF_LOST_CONTACT_EVT:
+               case PEER_LOST_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_UP_PEER_UP:
+               switch (evt) {
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_LEAVING;
+                       break;
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_LEAVING_PEER_DOWN;
+                       break;
+               case NODE_SYNCH_BEGIN_EVT:
+                       state = NODE_SYNCHING;
+                       break;
+               case NODE_FAILOVER_BEGIN_EVT:
+                       state = NODE_FAILINGOVER;
+                       break;
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+               case NODE_SYNCH_END_EVT:
+               case NODE_FAILOVER_END_EVT:
+                       break;
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_DOWN_PEER_LEAVING:
+               switch (evt) {
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_DOWN;
+                       break;
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+               case SELF_LOST_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_UP_PEER_COMING:
+               switch (evt) {
+               case PEER_ESTABL_CONTACT_EVT:
+                       state = SELF_UP_PEER_UP;
+                       break;
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_LEAVING;
+                       break;
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_LOST_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_COMING_PEER_UP:
+               switch (evt) {
+               case SELF_ESTABL_CONTACT_EVT:
+                       state = SELF_UP_PEER_UP;
+                       break;
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_LEAVING_PEER_DOWN;
+                       break;
+               case SELF_LOST_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_LEAVING_PEER_DOWN:
+               switch (evt) {
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_DOWN;
+                       break;
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+               case PEER_LOST_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case NODE_FAILINGOVER:
+               switch (evt) {
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_LEAVING;
+                       break;
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_LEAVING_PEER_DOWN;
+                       break;
+               case NODE_FAILOVER_END_EVT:
+                       state = SELF_UP_PEER_UP;
+                       break;
+               case NODE_FAILOVER_BEGIN_EVT:
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case NODE_SYNCHING:
+               switch (evt) {
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_LEAVING;
+                       break;
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_LEAVING_PEER_DOWN;
+                       break;
+               case NODE_SYNCH_END_EVT:
+                       state = SELF_UP_PEER_UP;
+                       break;
+               case NODE_FAILOVER_BEGIN_EVT:
+                       state = NODE_FAILINGOVER;
+                       break;
+               case NODE_SYNCH_BEGIN_EVT:
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+                       break;
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       default:
+               pr_err("Unknown node fsm state %x\n", state);
+               break;
+       }
+       n->state = state;
+       return;
+
+illegal_evt:
+       pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
 }
 
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
 {
-       int i;
+       int state = n->state;
 
-       for (i = 0; i < MAX_BEARERS; i++) {
-               if (l_ptr != n_ptr->links[i])
-                       continue;
-               n_ptr->links[i] = NULL;
-               n_ptr->link_cnt--;
+       if (likely(state == SELF_UP_PEER_UP))
+               return true;
+
+       if (state == SELF_LEAVING_PEER_DOWN)
+               return false;
+
+       if (state == SELF_DOWN_PEER_LEAVING) {
+               if (msg_peer_node_is_up(hdr))
+                       return false;
        }
+
+       return true;
 }
 
 static void node_established_contact(struct tipc_node *n_ptr)
 {
+       tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
        n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
        n_ptr->bclink.oos_state = 0;
        n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
        tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
 }
 
-static void node_lost_contact(struct tipc_node *n_ptr)
+static void node_lost_contact(struct tipc_node *n_ptr,
+                             struct sk_buff_head *inputq)
 {
        char addr_string[16];
        struct tipc_sock_conn *conn, *safe;
+       struct tipc_link *l;
        struct list_head *conns = &n_ptr->conn_sks;
        struct sk_buff *skb;
        struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
@@ -396,21 +840,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
 
        /* Abort any ongoing link failover */
        for (i = 0; i < MAX_BEARERS; i++) {
-               struct tipc_link *l_ptr = n_ptr->links[i];
-               if (!l_ptr)
-                       continue;
-               l_ptr->flags &= ~LINK_FAILINGOVER;
-               l_ptr->failover_checkpt = 0;
-               l_ptr->failover_pkts = 0;
-               kfree_skb(l_ptr->failover_skb);
-               l_ptr->failover_skb = NULL;
-               tipc_link_reset_fragments(l_ptr);
+               l = n_ptr->links[i].link;
+               if (l)
+                       tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
        }
 
-       n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
-
        /* Prevent re-contact with node until cleanup is done */
-       n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
+       tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
 
        /* Notify publications from this node */
        n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
@@ -421,10 +857,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                                      SHORT_H_SIZE, 0, tn->own_addr,
                                      conn->peer_node, conn->port,
                                      conn->peer_port, TIPC_ERR_NO_NODE);
-               if (likely(skb)) {
-                       skb_queue_tail(n_ptr->inputq, skb);
-                       n_ptr->action_flags |= TIPC_MSG_EVT;
-               }
+               if (likely(skb))
+                       skb_queue_tail(inputq, skb);
                list_del(&conn->list);
                kfree(conn);
        }
@@ -453,7 +887,7 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
                goto exit;
 
        tipc_node_lock(node);
-       link = node->links[bearer_id];
+       link = node->links[bearer_id].link;
        if (link) {
                strncpy(linkname, link->name, len);
                err = 0;
@@ -471,27 +905,20 @@ void tipc_node_unlock(struct tipc_node *node)
        u32 flags = node->action_flags;
        u32 link_id = 0;
        struct list_head *publ_list;
-       struct sk_buff_head *inputq = node->inputq;
-       struct sk_buff_head *namedq;
 
-       if (likely(!flags || (flags == TIPC_MSG_EVT))) {
-               node->action_flags = 0;
+       if (likely(!flags)) {
                spin_unlock_bh(&node->lock);
-               if (flags == TIPC_MSG_EVT)
-                       tipc_sk_rcv(net, inputq);
                return;
        }
 
        addr = node->addr;
        link_id = node->link_id;
-       namedq = node->namedq;
        publ_list = &node->publ_list;
 
-       node->action_flags &= ~(TIPC_MSG_EVT |
-                               TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+       node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
                                TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
                                TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
-                               TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
+                               TIPC_BCAST_RESET);
 
        spin_unlock_bh(&node->lock);
 
@@ -512,17 +939,11 @@ void tipc_node_unlock(struct tipc_node *node)
                tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
                                      link_id, addr);
 
-       if (flags & TIPC_MSG_EVT)
-               tipc_sk_rcv(net, inputq);
-
-       if (flags & TIPC_NAMED_MSG_EVT)
-               tipc_named_rcv(net, namedq);
-
        if (flags & TIPC_BCAST_MSG_EVT)
                tipc_bclink_input(net);
 
        if (flags & TIPC_BCAST_RESET)
-               tipc_link_reset_all(node);
+               tipc_node_reset_links(node);
 }
 
 /* Caller should hold node lock for the passed node */
@@ -559,6 +980,279 @@ msg_full:
        return -EMSGSIZE;
 }
 
+static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
+                                              int *bearer_id,
+                                              struct tipc_media_addr **maddr)
+{
+       int id = n->active_links[sel & 1];
+
+       if (unlikely(id < 0))
+               return NULL;
+
+       *bearer_id = id;
+       *maddr = &n->links[id].maddr;
+       return n->links[id].link;
+}
+
+/**
+ * tipc_node_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
+ * @list: chain of buffers containing message
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ */
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+                  u32 dnode, int selector)
+{
+       struct tipc_link *l = NULL;
+       struct tipc_node *n;
+       struct sk_buff_head xmitq;
+       struct tipc_media_addr *maddr;
+       int bearer_id;
+       int rc = -EHOSTUNREACH;
+
+       __skb_queue_head_init(&xmitq);
+       n = tipc_node_find(net, dnode);
+       if (likely(n)) {
+               tipc_node_lock(n);
+               l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
+               if (likely(l))
+                       rc = tipc_link_xmit(l, list, &xmitq);
+               tipc_node_unlock(n);
+               if (unlikely(rc == -ENOBUFS))
+                       tipc_node_link_down(n, bearer_id, false);
+               tipc_node_put(n);
+       }
+       if (likely(!rc)) {
+               tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
+               return 0;
+       }
+       if (likely(in_own_node(net, dnode))) {
+               tipc_sk_rcv(net, list);
+               return 0;
+       }
+       return rc;
+}
+
+/* tipc_node_xmit_skb(): send single buffer to destination
+ * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ * TODO: Return real return value, and let callers use
+ * tipc_wait_for_sendpkt() where applicable
+ */
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+                      u32 selector)
+{
+       struct sk_buff_head head;
+       int rc;
+
+       skb_queue_head_init(&head);
+       __skb_queue_tail(&head, skb);
+       rc = tipc_node_xmit(net, &head, dnode, selector);
+       if (rc == -ELINKCONG)
+               kfree_skb(skb);
+       return 0;
+}
+
+/**
+ * tipc_node_check_state - check and if necessary update node state
+ * @skb: TIPC packet
+ * @bearer_id: identity of bearer delivering the packet
+ * Returns true if state is ok, otherwise consumes buffer and returns false
+ */
+static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
+                                 int bearer_id, struct sk_buff_head *xmitq)
+{
+       struct tipc_msg *hdr = buf_msg(skb);
+       int usr = msg_user(hdr);
+       int mtyp = msg_type(hdr);
+       u16 oseqno = msg_seqno(hdr);
+       u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
+       u16 exp_pkts = msg_msgcnt(hdr);
+       u16 rcv_nxt, syncpt, dlv_nxt;
+       int state = n->state;
+       struct tipc_link *l, *pl = NULL;
+       struct tipc_media_addr *maddr;
+       int i, pb_id;
+
+       l = n->links[bearer_id].link;
+       if (!l)
+               return false;
+       rcv_nxt = l->rcv_nxt;
+
+
+       if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
+               return true;
+
+       /* Find parallel link, if any */
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if ((i != bearer_id) && n->links[i].link) {
+                       pl = n->links[i].link;
+                       break;
+               }
+       }
+
+       /* Update node accesibility if applicable */
+       if (state == SELF_UP_PEER_COMING) {
+               if (!tipc_link_is_up(l))
+                       return true;
+               if (!msg_peer_link_is_up(hdr))
+                       return true;
+               tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
+       }
+
+       if (state == SELF_DOWN_PEER_LEAVING) {
+               if (msg_peer_node_is_up(hdr))
+                       return false;
+               tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+       }
+
+       /* Ignore duplicate packets */
+       if (less(oseqno, rcv_nxt))
+               return true;
+
+       /* Initiate or update failover mode if applicable */
+       if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
+               syncpt = oseqno + exp_pkts - 1;
+               if (pl && tipc_link_is_up(pl)) {
+                       pb_id = pl->bearer_id;
+                       __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+                       tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq);
+               }
+               /* If pkts arrive out of order, use lowest calculated syncpt */
+               if (less(syncpt, n->sync_point))
+                       n->sync_point = syncpt;
+       }
+
+       /* Open parallel link when tunnel link reaches synch point */
+       if ((n->state == NODE_FAILINGOVER) && !tipc_link_is_failingover(l)) {
+               if (!more(rcv_nxt, n->sync_point))
+                       return true;
+               tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
+               if (pl)
+                       tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
+               return true;
+       }
+
+       /* Initiate or update synch mode if applicable */
+       if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+               syncpt = iseqno + exp_pkts - 1;
+               if (!tipc_link_is_up(l)) {
+                       tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+                       __tipc_node_link_up(n, bearer_id, xmitq);
+               }
+               if (n->state == SELF_UP_PEER_UP) {
+                       n->sync_point = syncpt;
+                       tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+                       tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
+               }
+               if (less(syncpt, n->sync_point))
+                       n->sync_point = syncpt;
+       }
+
+       /* Open tunnel link when parallel link reaches synch point */
+       if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
+               if (pl)
+                       dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq));
+               if (!pl || more(dlv_nxt, n->sync_point)) {
+                       tipc_link_fsm_evt(l, LINK_SYNCH_END_EVT);
+                       tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+                       return true;
+               }
+               if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
+                       return true;
+               if (usr == LINK_PROTOCOL)
+                       return true;
+               return false;
+       }
+       return true;
+}
+
+/**
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+{
+       struct sk_buff_head xmitq;
+       struct tipc_node *n;
+       struct tipc_msg *hdr = buf_msg(skb);
+       int usr = msg_user(hdr);
+       int bearer_id = b->identity;
+       struct tipc_link_entry *le;
+       int rc = 0;
+
+       __skb_queue_head_init(&xmitq);
+
+       /* Ensure message is well-formed */
+       if (unlikely(!tipc_msg_validate(skb)))
+               goto discard;
+
+       /* Handle arrival of a non-unicast link packet */
+       if (unlikely(msg_non_seq(hdr))) {
+               if (usr ==  LINK_CONFIG)
+                       tipc_disc_rcv(net, skb, b);
+               else
+                       tipc_bclink_rcv(net, skb);
+               return;
+       }
+
+       /* Locate neighboring node that sent packet */
+       n = tipc_node_find(net, msg_prevnode(hdr));
+       if (unlikely(!n))
+               goto discard;
+       le = &n->links[bearer_id];
+
+       tipc_node_lock(n);
+
+       /* Is reception permitted at the moment ? */
+       if (!tipc_node_filter_pkt(n, hdr))
+               goto unlock;
+
+       if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+               tipc_bclink_sync_state(n, hdr);
+
+       /* Release acked broadcast packets */
+       if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
+               tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
+
+       /* Check and if necessary update node state */
+       if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
+               rc = tipc_link_rcv(le->link, skb, &xmitq);
+               skb = NULL;
+       }
+unlock:
+       tipc_node_unlock(n);
+
+       if (unlikely(rc & TIPC_LINK_UP_EVT))
+               tipc_node_link_up(n, bearer_id, &xmitq);
+
+       if (unlikely(rc & TIPC_LINK_DOWN_EVT))
+               tipc_node_link_down(n, bearer_id, false);
+
+       if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
+               tipc_named_rcv(net, &n->bclink.namedq);
+
+       if (!skb_queue_empty(&le->inputq))
+               tipc_sk_rcv(net, &le->inputq);
+
+       if (!skb_queue_empty(&xmitq))
+               tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+       tipc_node_put(n);
+discard:
+       kfree_skb(skb);
+}
+
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int err;
index 5a834cf142c8432fc4b5d65ab9ada044b342f438..344b3e7594fd0d59d8b83143181ffeac8c2df9a9 100644 (file)
 /* Out-of-range value for node signature */
 #define INVALID_NODE_SIG       0x10000
 
+#define INVALID_BEARER_ID -1
+
 /* Flags used to take different actions according to flag type
- * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
- * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
  * TIPC_NOTIFY_NODE_DOWN: notify node is down
  * TIPC_NOTIFY_NODE_UP: notify node is up
  * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
  */
 enum {
-       TIPC_MSG_EVT                    = 1,
-       TIPC_WAIT_PEER_LINKS_DOWN       = (1 << 1),
-       TIPC_WAIT_OWN_LINKS_DOWN        = (1 << 2),
        TIPC_NOTIFY_NODE_DOWN           = (1 << 3),
        TIPC_NOTIFY_NODE_UP             = (1 << 4),
        TIPC_WAKEUP_BCAST_USERS         = (1 << 5),
        TIPC_NOTIFY_LINK_UP             = (1 << 6),
        TIPC_NOTIFY_LINK_DOWN           = (1 << 7),
-       TIPC_NAMED_MSG_EVT              = (1 << 8),
        TIPC_BCAST_MSG_EVT              = (1 << 9),
        TIPC_BCAST_RESET                = (1 << 10)
 };
@@ -85,10 +81,17 @@ struct tipc_node_bclink {
        u32 deferred_size;
        struct sk_buff_head deferdq;
        struct sk_buff *reasm_buf;
-       int inputq_map;
+       struct sk_buff_head namedq;
        bool recv_permitted;
 };
 
+struct tipc_link_entry {
+       struct tipc_link *link;
+       u32 mtu;
+       struct sk_buff_head inputq;
+       struct tipc_media_addr maddr;
+};
+
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
@@ -98,11 +101,12 @@ struct tipc_node_bclink {
  * @hash: links to adjacent nodes in unsorted hash chain
  * @inputq: pointer to input queue containing messages for msg event
  * @namedq: pointer to name table input queue with name table messages
- * @curr_link: the link holding the node lock, if any
- * @active_links: pointers to active links to node
- * @links: pointers to all links to node
+ * @active_links: bearer ids of active links, used as index into links[] array
+ * @links: array containing references to all links to node
  * @action_flags: bit mask of different types of node actions
  * @bclink: broadcast-related info
+ * @state: connectivity state vs peer node
+ * @sync_point: sequence number where synch/failover is finished
  * @list: links to adjacent nodes in sorted list of cluster's nodes
  * @working_links: number of working links to node (both active and standby)
  * @link_cnt: number of links to node
@@ -118,14 +122,13 @@ struct tipc_node {
        spinlock_t lock;
        struct net *net;
        struct hlist_node hash;
-       struct sk_buff_head *inputq;
-       struct sk_buff_head *namedq;
-       struct tipc_link *active_links[2];
-       u32 act_mtus[2];
-       struct tipc_link *links[MAX_BEARERS];
+       int active_links[2];
+       struct tipc_link_entry links[MAX_BEARERS];
        int action_flags;
        struct tipc_node_bclink bclink;
        struct list_head list;
+       int state;
+       u16 sync_point;
        int link_cnt;
        u16 working_links;
        u16 capabilities;
@@ -133,25 +136,32 @@ struct tipc_node {
        u32 link_id;
        struct list_head publ_list;
        struct list_head conn_sks;
+       unsigned long keepalive_intv;
+       struct timer_list timer;
        struct rcu_head rcu;
 };
 
 struct tipc_node *tipc_node_find(struct net *net, u32 addr);
 void tipc_node_put(struct tipc_node *node);
-struct tipc_node *tipc_node_create(struct net *net, u32 addr);
 void tipc_node_stop(struct net *net);
+void tipc_node_check_dest(struct net *net, u32 onode,
+                         struct tipc_bearer *bearer,
+                         u16 capabilities, u32 signature,
+                         struct tipc_media_addr *maddr,
+                         bool *respond, bool *dupl_addr);
+void tipc_node_delete_links(struct net *net, int bearer_id);
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-int tipc_node_active_links(struct tipc_node *n_ptr);
-int tipc_node_is_up(struct tipc_node *n_ptr);
+bool tipc_node_is_up(struct tipc_node *n);
 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
                           char *linkname, size_t len);
 void tipc_node_unlock(struct tipc_node *node);
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+                  int selector);
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+                      u32 selector);
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
 static inline void tipc_node_lock(struct tipc_node *node)
@@ -159,26 +169,30 @@ static inline void tipc_node_lock(struct tipc_node *node)
        spin_lock_bh(&node->lock);
 }
 
-static inline bool tipc_node_blocked(struct tipc_node *node)
+static inline struct tipc_link *node_active_link(struct tipc_node *n, int sel)
 {
-       return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
-               TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
+       int bearer_id = n->active_links[sel & 1];
+
+       if (unlikely(bearer_id == INVALID_BEARER_ID))
+               return NULL;
+
+       return n->links[bearer_id].link;
 }
 
-static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
+static inline unsigned int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
 {
-       struct tipc_node *node;
-       u32 mtu;
-
-       node = tipc_node_find(net, addr);
+       struct tipc_node *n;
+       int bearer_id;
+       unsigned int mtu = MAX_MSG_SIZE;
 
-       if (likely(node)) {
-               mtu = node->act_mtus[selector & 1];
-               tipc_node_put(node);
-       } else {
-               mtu = MAX_MSG_SIZE;
-       }
+       n = tipc_node_find(net, addr);
+       if (unlikely(!n))
+               return mtu;
 
+       bearer_id = n->active_links[sel & 1];
+       if (likely(bearer_id != INVALID_BEARER_ID))
+               mtu = n->links[bearer_id].mtu;
+       tipc_node_put(n);
        return mtu;
 }
 
index 3a7567f690f35458f0fe58e0cc1254a4ec8033fa..1060d52ff23eb14f7b2ed2f732beacf9a698502c 100644 (file)
@@ -248,6 +248,22 @@ static void tsk_advance_rx_queue(struct sock *sk)
        kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
 }
 
+/* tipc_sk_respond() : send response message back to sender
+ */
+static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
+{
+       u32 selector;
+       u32 dnode;
+       u32 onode = tipc_own_addr(sock_net(sk));
+
+       if (!tipc_msg_reverse(onode, &skb, err))
+               return;
+
+       dnode = msg_destnode(buf_msg(skb));
+       selector = msg_origport(buf_msg(skb));
+       tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+}
+
 /**
  * tsk_rej_rx_queue - reject all buffers in socket receive queue
  *
@@ -256,13 +272,9 @@ static void tsk_advance_rx_queue(struct sock *sk)
 static void tsk_rej_rx_queue(struct sock *sk)
 {
        struct sk_buff *skb;
-       u32 dnode;
-       u32 own_node = tsk_own_node(tipc_sk(sk));
 
-       while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
-               if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
-                       tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
-       }
+       while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
+               tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 }
 
 /* tsk_peer_msg - verify if message was sent by connected port's peer
@@ -441,9 +453,7 @@ static int tipc_release(struct socket *sock)
                                tsk->connected = 0;
                                tipc_node_remove_conn(net, dnode, tsk->portid);
                        }
-                       if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-                                            TIPC_ERR_NO_PORT))
-                               tipc_link_xmit_skb(net, skb, dnode, 0);
+                       tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
                }
        }
 
@@ -456,7 +466,7 @@ static int tipc_release(struct socket *sock)
                                      tsk_own_node(tsk), tsk_peer_port(tsk),
                                      tsk->portid, TIPC_ERR_NO_PORT);
                if (skb)
-                       tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+                       tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
                tipc_node_remove_conn(net, dnode, tsk->portid);
        }
 
@@ -686,21 +696,22 @@ new_mtu:
 
        do {
                rc = tipc_bclink_xmit(net, pktchain);
-               if (likely(rc >= 0)) {
-                       rc = dsz;
-                       break;
+               if (likely(!rc))
+                       return dsz;
+
+               if (rc == -ELINKCONG) {
+                       tsk->link_cong = 1;
+                       rc = tipc_wait_for_sndmsg(sock, &timeo);
+                       if (!rc)
+                               continue;
                }
+               __skb_queue_purge(pktchain);
                if (rc == -EMSGSIZE) {
                        msg->msg_iter = save;
                        goto new_mtu;
                }
-               if (rc != -ELINKCONG)
-                       break;
-               tipc_sk(sk)->link_cong = 1;
-               rc = tipc_wait_for_sndmsg(sock, &timeo);
-               if (rc)
-                       __skb_queue_purge(pktchain);
-       } while (!rc);
+               break;
+       } while (1);
        return rc;
 }
 
@@ -763,35 +774,35 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 /**
  * tipc_sk_proto_rcv - receive a connection mng protocol message
  * @tsk: receiving socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
+ * @skb: pointer to message buffer.
  */
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
 {
-       struct tipc_msg *msg = buf_msg(*skb);
+       struct sock *sk = &tsk->sk;
+       struct tipc_msg *hdr = buf_msg(skb);
+       int mtyp = msg_type(hdr);
        int conn_cong;
-       u32 dnode;
-       u32 own_node = tsk_own_node(tsk);
+
        /* Ignore if connection cannot be validated: */
-       if (!tsk_peer_msg(tsk, msg))
+       if (!tsk_peer_msg(tsk, hdr))
                goto exit;
 
        tsk->probing_state = TIPC_CONN_OK;
 
-       if (msg_type(msg) == CONN_ACK) {
+       if (mtyp == CONN_PROBE) {
+               msg_set_type(hdr, CONN_PROBE_REPLY);
+               tipc_sk_respond(sk, skb, TIPC_OK);
+               return;
+       } else if (mtyp == CONN_ACK) {
                conn_cong = tsk_conn_cong(tsk);
-               tsk->sent_unacked -= msg_msgcnt(msg);
+               tsk->sent_unacked -= msg_msgcnt(hdr);
                if (conn_cong)
-                       tsk->sk.sk_write_space(&tsk->sk);
-       } else if (msg_type(msg) == CONN_PROBE) {
-               if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
-                       msg_set_type(msg, CONN_PROBE_REPLY);
-                       return;
-               }
+                       sk->sk_write_space(sk);
+       } else if (mtyp != CONN_PROBE_REPLY) {
+               pr_warn("Received unknown CONN_PROTO msg\n");
        }
-       /* Do nothing if msg_type() == CONN_PROBE_REPLY */
 exit:
-       kfree_skb(*skb);
-       *skb = NULL;
+       kfree_skb(skb);
 }
 
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -924,24 +935,25 @@ new_mtu:
        do {
                skb = skb_peek(pktchain);
                TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-               rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
-               if (likely(rc >= 0)) {
+               rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+               if (likely(!rc)) {
                        if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
-                       rc = dsz;
-                       break;
+                       return dsz;
                }
+               if (rc == -ELINKCONG) {
+                       tsk->link_cong = 1;
+                       rc = tipc_wait_for_sndmsg(sock, &timeo);
+                       if (!rc)
+                               continue;
+               }
+               __skb_queue_purge(pktchain);
                if (rc == -EMSGSIZE) {
                        m->msg_iter = save;
                        goto new_mtu;
                }
-               if (rc != -ELINKCONG)
-                       break;
-               tsk->link_cong = 1;
-               rc = tipc_wait_for_sndmsg(sock, &timeo);
-               if (rc)
-                       __skb_queue_purge(pktchain);
-       } while (!rc);
+               break;
+       } while (1);
 
        return rc;
 }
@@ -1043,15 +1055,16 @@ next:
                return rc;
        do {
                if (likely(!tsk_conn_cong(tsk))) {
-                       rc = tipc_link_xmit(net, pktchain, dnode, portid);
+                       rc = tipc_node_xmit(net, pktchain, dnode, portid);
                        if (likely(!rc)) {
                                tsk->sent_unacked++;
                                sent += send;
                                if (sent == dsz)
-                                       break;
+                                       return dsz;
                                goto next;
                        }
                        if (rc == -EMSGSIZE) {
+                               __skb_queue_purge(pktchain);
                                tsk->max_pkt = tipc_node_get_mtu(net, dnode,
                                                                 portid);
                                m->msg_iter = save;
@@ -1059,13 +1072,13 @@ next:
                        }
                        if (rc != -ELINKCONG)
                                break;
+
                        tsk->link_cong = 1;
                }
                rc = tipc_wait_for_sndpkt(sock, &timeo);
-               if (rc)
-                       __skb_queue_purge(pktchain);
        } while (!rc);
 
+       __skb_queue_purge(pktchain);
        return sent ? sent : rc;
 }
 
@@ -1221,7 +1234,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
                return;
        msg = buf_msg(skb);
        msg_set_msgcnt(msg, ack);
-       tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
+       tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 }
 
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1507,82 +1520,81 @@ static void tipc_data_ready(struct sock *sk)
  * @tsk: TIPC socket
  * @skb: pointer to message buffer. Set to NULL if buffer is consumed
  *
- * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns true if everything ok, false otherwise
  */
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
+static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 {
        struct sock *sk = &tsk->sk;
        struct net *net = sock_net(sk);
        struct socket *sock = sk->sk_socket;
-       struct tipc_msg *msg = buf_msg(*skb);
-       int retval = -TIPC_ERR_NO_PORT;
+       struct tipc_msg *hdr = buf_msg(skb);
 
-       if (msg_mcast(msg))
-               return retval;
+       if (unlikely(msg_mcast(hdr)))
+               return false;
 
        switch ((int)sock->state) {
        case SS_CONNECTED:
+
                /* Accept only connection-based messages sent by peer */
-               if (tsk_peer_msg(tsk, msg)) {
-                       if (unlikely(msg_errcode(msg))) {
-                               sock->state = SS_DISCONNECTING;
-                               tsk->connected = 0;
-                               /* let timer expire on it's own */
-                               tipc_node_remove_conn(net, tsk_peer_node(tsk),
-                                                     tsk->portid);
-                       }
-                       retval = TIPC_OK;
+               if (unlikely(!tsk_peer_msg(tsk, hdr)))
+                       return false;
+
+               if (unlikely(msg_errcode(hdr))) {
+                       sock->state = SS_DISCONNECTING;
+                       tsk->connected = 0;
+                       /* Let timer expire on it's own */
+                       tipc_node_remove_conn(net, tsk_peer_node(tsk),
+                                             tsk->portid);
                }
-               break;
+               return true;
+
        case SS_CONNECTING:
-               /* Accept only ACK or NACK message */
 
-               if (unlikely(!msg_connected(msg)))
-                       break;
+               /* Accept only ACK or NACK message */
+               if (unlikely(!msg_connected(hdr)))
+                       return false;
 
-               if (unlikely(msg_errcode(msg))) {
+               if (unlikely(msg_errcode(hdr))) {
                        sock->state = SS_DISCONNECTING;
                        sk->sk_err = ECONNREFUSED;
-                       retval = TIPC_OK;
-                       break;
+                       return true;
                }
 
-               if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
+               if (unlikely(!msg_isdata(hdr))) {
                        sock->state = SS_DISCONNECTING;
                        sk->sk_err = EINVAL;
-                       retval = TIPC_OK;
-                       break;
+                       return true;
                }
 
-               tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
-               msg_set_importance(&tsk->phdr, msg_importance(msg));
+               tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
+               msg_set_importance(&tsk->phdr, msg_importance(hdr));
                sock->state = SS_CONNECTED;
 
-               /* If an incoming message is an 'ACK-', it should be
-                * discarded here because it doesn't contain useful
-                * data. In addition, we should try to wake up
-                * connect() routine if sleeping.
-                */
-               if (msg_data_sz(msg) == 0) {
-                       kfree_skb(*skb);
-                       *skb = NULL;
-                       if (waitqueue_active(sk_sleep(sk)))
-                               wake_up_interruptible(sk_sleep(sk));
-               }
-               retval = TIPC_OK;
-               break;
+               /* If 'ACK+' message, add to socket receive queue */
+               if (msg_data_sz(hdr))
+                       return true;
+
+               /* If empty 'ACK-' message, wake up sleeping connect() */
+               if (waitqueue_active(sk_sleep(sk)))
+                       wake_up_interruptible(sk_sleep(sk));
+
+               /* 'ACK-' message is neither accepted nor rejected: */
+               msg_set_dest_droppable(hdr, 1);
+               return false;
+
        case SS_LISTENING:
        case SS_UNCONNECTED:
+
                /* Accept only SYN message */
-               if (!msg_connected(msg) && !(msg_errcode(msg)))
-                       retval = TIPC_OK;
+               if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+                       return true;
                break;
        case SS_DISCONNECTING:
                break;
        default:
                pr_err("Unknown socket state %u\n", sock->state);
        }
-       return retval;
+       return false;
 }
 
 /**
@@ -1617,61 +1629,70 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
 /**
  * filter_rcv - validate incoming message
  * @sk: socket
- * @skb: pointer to message. Set to NULL if buffer is consumed.
+ * @skb: pointer to message.
  *
  * Enqueues message on receive queue if acceptable; optionally handles
  * disconnect indication for a connected socket.
  *
  * Called with socket lock already taken
  *
- * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
+ * Returns true if message was added to socket receive queue, otherwise false
  */
-static int filter_rcv(struct sock *sk, struct sk_buff **skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
 {
        struct socket *sock = sk->sk_socket;
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_msg *msg = buf_msg(*skb);
-       unsigned int limit = rcvbuf_limit(sk, *skb);
-       int rc = TIPC_OK;
+       struct tipc_msg *hdr = buf_msg(skb);
+       unsigned int limit = rcvbuf_limit(sk, skb);
+       int err = TIPC_OK;
+       int usr = msg_user(hdr);
 
-       if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+       if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
                tipc_sk_proto_rcv(tsk, skb);
-               return TIPC_OK;
+               return false;
        }
 
-       if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
-               kfree_skb(*skb);
+       if (unlikely(usr == SOCK_WAKEUP)) {
+               kfree_skb(skb);
                tsk->link_cong = 0;
                sk->sk_write_space(sk);
-               *skb = NULL;
-               return TIPC_OK;
+               return false;
        }
 
-       /* Reject message if it is wrong sort of message for socket */
-       if (msg_type(msg) > TIPC_DIRECT_MSG)
-               return -TIPC_ERR_NO_PORT;
+       /* Drop if illegal message type */
+       if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
+               kfree_skb(skb);
+               return false;
+       }
 
-       if (sock->state == SS_READY) {
-               if (msg_connected(msg))
-                       return -TIPC_ERR_NO_PORT;
-       } else {
-               rc = filter_connect(tsk, skb);
-               if (rc != TIPC_OK || !*skb)
-                       return rc;
+       /* Reject if wrong message type for current socket state */
+       if (unlikely(sock->state == SS_READY)) {
+               if (msg_connected(hdr)) {
+                       err = TIPC_ERR_NO_PORT;
+                       goto reject;
+               }
+       } else if (unlikely(!filter_connect(tsk, skb))) {
+               err = TIPC_ERR_NO_PORT;
+               goto reject;
        }
 
        /* Reject message if there isn't room to queue it */
-       if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
-               return -TIPC_ERR_OVERLOAD;
+       if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
+               err = TIPC_ERR_OVERLOAD;
+               goto reject;
+       }
 
        /* Enqueue message */
-       TIPC_SKB_CB(*skb)->handle = NULL;
-       __skb_queue_tail(&sk->sk_receive_queue, *skb);
-       skb_set_owner_r(*skb, sk);
+       TIPC_SKB_CB(skb)->handle = NULL;
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+       skb_set_owner_r(skb, sk);
 
        sk->sk_data_ready(sk);
-       *skb = NULL;
-       return TIPC_OK;
+       return true;
+
+reject:
+       tipc_sk_respond(sk, skb, err);
+       return false;
 }
 
 /**
@@ -1685,22 +1706,10 @@ static int filter_rcv(struct sock *sk, struct sk_buff **skb)
  */
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
-       int err;
-       atomic_t *dcnt;
-       u32 dnode;
-       struct tipc_sock *tsk = tipc_sk(sk);
-       struct net *net = sock_net(sk);
-       uint truesize = skb->truesize;
+       unsigned int truesize = skb->truesize;
 
-       err = filter_rcv(sk, &skb);
-       if (likely(!skb)) {
-               dcnt = &tsk->dupl_rcvcnt;
-               if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
-                       atomic_add(truesize, dcnt);
-               return 0;
-       }
-       if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
-               tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+       if (likely(filter_rcv(sk, skb)))
+               atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
        return 0;
 }
 
@@ -1710,45 +1719,43 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  * @inputq: list of incoming buffers with potentially different destinations
  * @sk: socket where the buffers should be enqueued
  * @dport: port number for the socket
- * @_skb: returned buffer to be forwarded or rejected, if applicable
  *
  * Caller must hold socket lock
- *
- * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
- * or -TIPC_ERR_NO_PORT
  */
-static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
-                          u32 dport, struct sk_buff **_skb)
+static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+                           u32 dport)
 {
        unsigned int lim;
        atomic_t *dcnt;
-       int err;
        struct sk_buff *skb;
        unsigned long time_limit = jiffies + 2;
 
        while (skb_queue_len(inputq)) {
                if (unlikely(time_after_eq(jiffies, time_limit)))
-                       return TIPC_OK;
+                       return;
+
                skb = tipc_skb_dequeue(inputq, dport);
                if (unlikely(!skb))
-                       return TIPC_OK;
+                       return;
+
+               /* Add message directly to receive queue if possible */
                if (!sock_owned_by_user(sk)) {
-                       err = filter_rcv(sk, &skb);
-                       if (likely(!skb))
-                               continue;
-                       *_skb = skb;
-                       return err;
+                       filter_rcv(sk, skb);
+                       continue;
                }
+
+               /* Try backlog, compensating for double-counted bytes */
                dcnt = &tipc_sk(sk)->dupl_rcvcnt;
                if (sk->sk_backlog.len)
                        atomic_set(dcnt, 0);
                lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
                if (likely(!sk_add_backlog(sk, skb, lim)))
                        continue;
-               *_skb = skb;
-               return -TIPC_ERR_OVERLOAD;
+
+               /* Overload => reject message back to sender */
+               tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+               break;
        }
-       return TIPC_OK;
 }
 
 /**
@@ -1756,49 +1763,46 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
  * @inputq: buffer list containing the buffers
  * Consumes all buffers in list until inputq is empty
  * Note: may be called in multiple threads referring to the same queue
- * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
- * Only node local calls check the return value, sending single-buffer queues
  */
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
        u32 dnode, dport = 0;
        int err;
-       struct sk_buff *skb;
        struct tipc_sock *tsk;
-       struct tipc_net *tn;
        struct sock *sk;
+       struct sk_buff *skb;
 
        while (skb_queue_len(inputq)) {
-               err = -TIPC_ERR_NO_PORT;
-               skb = NULL;
                dport = tipc_skb_peek_port(inputq, dport);
                tsk = tipc_sk_lookup(net, dport);
+
                if (likely(tsk)) {
                        sk = &tsk->sk;
                        if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
-                               err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+                               tipc_sk_enqueue(inputq, sk, dport);
                                spin_unlock_bh(&sk->sk_lock.slock);
-                               dport = 0;
                        }
                        sock_put(sk);
-               } else {
-                       skb = tipc_skb_dequeue(inputq, dport);
-               }
-               if (likely(!skb))
                        continue;
-               if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
-                       goto xmit;
-               if (!err) {
-                       dnode = msg_destnode(buf_msg(skb));
-                       goto xmit;
                }
-               tn = net_generic(net, tipc_net_id);
-               if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+
+               /* No destination socket => dequeue skb if still there */
+               skb = tipc_skb_dequeue(inputq, dport);
+               if (!skb)
+                       return;
+
+               /* Try secondary lookup if unresolved named message */
+               err = TIPC_ERR_NO_PORT;
+               if (tipc_msg_lookup_dest(net, skb, &err))
+                       goto xmit;
+
+               /* Prepare for message rejection */
+               if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
                        continue;
 xmit:
-               tipc_link_xmit_skb(net, skb, dnode, dport);
+               dnode = msg_destnode(buf_msg(skb));
+               tipc_node_xmit_skb(net, skb, dnode, dport);
        }
-       return err ? -EHOSTUNREACH : 0;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2067,7 +2071,10 @@ static int tipc_shutdown(struct socket *sock, int how)
        struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
        struct sk_buff *skb;
-       u32 dnode;
+       u32 dnode = tsk_peer_node(tsk);
+       u32 dport = tsk_peer_port(tsk);
+       u32 onode = tipc_own_addr(net);
+       u32 oport = tsk->portid;
        int res;
 
        if (how != SHUT_RDWR)
@@ -2080,6 +2087,8 @@ static int tipc_shutdown(struct socket *sock, int how)
        case SS_CONNECTED:
 
 restart:
+               dnode = tsk_peer_node(tsk);
+
                /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
                skb = __skb_dequeue(&sk->sk_receive_queue);
                if (skb) {
@@ -2087,19 +2096,13 @@ restart:
                                kfree_skb(skb);
                                goto restart;
                        }
-                       if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-                                            TIPC_CONN_SHUTDOWN))
-                               tipc_link_xmit_skb(net, skb, dnode,
-                                                  tsk->portid);
+                       tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
                } else {
-                       dnode = tsk_peer_node(tsk);
-
                        skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
                                              TIPC_CONN_MSG, SHORT_H_SIZE,
-                                             0, dnode, tsk_own_node(tsk),
-                                             tsk_peer_port(tsk),
-                                             tsk->portid, TIPC_CONN_SHUTDOWN);
-                       tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+                                             0, dnode, onode, dport, oport,
+                                             TIPC_CONN_SHUTDOWN);
+                       tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
                }
                tsk->connected = 0;
                sock->state = SS_DISCONNECTING;
@@ -2161,7 +2164,7 @@ static void tipc_sk_timeout(unsigned long data)
        }
        bh_unlock_sock(sk);
        if (skb)
-               tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
+               tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
 exit:
        sock_put(sk);
 }
index bf6551389522dfda37fb0eff4bb5d15221bb2b91..4241f22069dc93270f9760c2f30ec9d36ede84d7 100644 (file)
@@ -44,7 +44,7 @@
                                  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 int tipc_socket_init(void);
 void tipc_socket_stop(void);
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                       struct sk_buff_head *inputq);
 void tipc_sk_reinit(struct net *net);
index 66deebc66aa10820880bd51839bd2ae379eaeb86..c170d3138953a2361df5439aeffadd29afa52ad9 100644 (file)
@@ -194,7 +194,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                        .saddr = src->ipv6,
                        .flowi6_proto = IPPROTO_UDP
                };
-               err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+               err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
+                                                &fl6);
                if (err)
                        goto tx_error;
                ttl = ip6_dst_hoplimit(ndst);
index bd16c6c7e1e7660b8ce183c72b16eb65aa51af5b..0cebf1fc37a2743ba096747056fab6c927922b23 100644 (file)
@@ -2048,7 +2048,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
                xfrm_audit_policy_delete(xp, 1, true);
        } else {
                // reset the timers here?
-               WARN(1, "Dont know what to do with soft policy expire\n");
+               WARN(1, "Don't know what to do with soft policy expire\n");
        }
        km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
index bdf1c1607b808e9ac0d7ada164a25694c56b6965..c77c872fe8ee477c7cfabf5fd539824fc676b173 100644 (file)
@@ -60,4 +60,29 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag
 static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
        (void *) BPF_FUNC_l4_csum_replace;
 
+#if defined(__x86_64__)
+
+#define PT_REGS_PARM1(x) ((x)->di)
+#define PT_REGS_PARM2(x) ((x)->si)
+#define PT_REGS_PARM3(x) ((x)->dx)
+#define PT_REGS_PARM4(x) ((x)->cx)
+#define PT_REGS_PARM5(x) ((x)->r8)
+#define PT_REGS_RET(x) ((x)->sp)
+#define PT_REGS_FP(x) ((x)->bp)
+#define PT_REGS_RC(x) ((x)->ax)
+#define PT_REGS_SP(x) ((x)->sp)
+
+#elif defined(__s390x__)
+
+#define PT_REGS_PARM1(x) ((x)->gprs[2])
+#define PT_REGS_PARM2(x) ((x)->gprs[3])
+#define PT_REGS_PARM3(x) ((x)->gprs[4])
+#define PT_REGS_PARM4(x) ((x)->gprs[5])
+#define PT_REGS_PARM5(x) ((x)->gprs[6])
+#define PT_REGS_RET(x) ((x)->gprs[14])
+#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->gprs[2])
+#define PT_REGS_SP(x) ((x)->gprs[15])
+
+#endif
 #endif
index 693605997abcbb9eb7d069d2ec57b8046579263b..ee0f110c9c543b54fd8593607d161598fb29e472 100644 (file)
@@ -822,6 +822,65 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "PTR_TO_STACK store/load",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "PTR_TO_STACK store/load - bad alignment on off",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "misaligned access off -6 size 8",
+       },
+       {
+               "PTR_TO_STACK store/load - bad alignment on reg",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "misaligned access off -2 size 8",
+       },
+       {
+               "PTR_TO_STACK store/load - out of bounds low",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack off=-79992 size=8",
+       },
+       {
+               "PTR_TO_STACK store/load - out of bounds high",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack off=0 size=8",
+       },
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
index 31620463701a55edc0afc54d45a5cabaef890410..3f450a8fa1f3487b2cf2d9b261f63dc3d25f6599 100644 (file)
@@ -29,7 +29,7 @@ int bpf_prog1(struct pt_regs *ctx)
        int len;
 
        /* non-portable! works for the given kernel only */
-       skb = (struct sk_buff *) ctx->di;
+       skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
 
        dev = _(skb->dev);
 
index dc50f4f2943f937d5c389e5384d39a0153d3f273..b32367cfbff4aff3020bb9c36c8faf6c981dd0f8 100644 (file)
@@ -27,10 +27,10 @@ int bpf_prog2(struct pt_regs *ctx)
        long init_val = 1;
        long *value;
 
-       /* x64 specific: read ip of kfree_skb caller.
+       /* x64/s390x specific: read ip of kfree_skb caller.
         * non-portable version of __builtin_return_address(0)
         */
-       bpf_probe_read(&loc, sizeof(loc), (void *)ctx->sp);
+       bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx));
 
        value = bpf_map_lookup_elem(&my_map, &loc);
        if (value)
@@ -79,7 +79,7 @@ struct bpf_map_def SEC("maps") my_hist_map = {
 SEC("kprobe/sys_write")
 int bpf_prog3(struct pt_regs *ctx)
 {
-       long write_size = ctx->dx; /* arg3 */
+       long write_size = PT_REGS_PARM3(ctx);
        long init_val = 1;
        long *value;
        struct hist_key key = {};
index 255ff27923666a844e3cb2881e25661278c7c1d6..bf337fbb09472cbe32bfbaff2d4313b7cafb58c6 100644 (file)
@@ -23,7 +23,7 @@ struct bpf_map_def SEC("maps") my_map = {
 SEC("kprobe/blk_mq_start_request")
 int bpf_prog1(struct pt_regs *ctx)
 {
-       long rq = ctx->di;
+       long rq = PT_REGS_PARM1(ctx);
        u64 val = bpf_ktime_get_ns();
 
        bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
@@ -51,7 +51,7 @@ struct bpf_map_def SEC("maps") lat_map = {
 SEC("kprobe/blk_update_request")
 int bpf_prog2(struct pt_regs *ctx)
 {
-       long rq = ctx->di;
+       long rq = PT_REGS_PARM1(ctx);
        u64 *value, l, base;
        u32 index;
 
index 126b80512228aa6493c8332e75933ab5852acab1..ac4671420cf15949c4087b8c2847c69978646650 100644 (file)
@@ -27,7 +27,7 @@ struct bpf_map_def SEC("maps") my_map = {
 SEC("kprobe/kmem_cache_free")
 int bpf_prog1(struct pt_regs *ctx)
 {
-       long ptr = ctx->si;
+       long ptr = PT_REGS_PARM2(ctx);
 
        bpf_map_delete_elem(&my_map, &ptr);
        return 0;
@@ -36,11 +36,11 @@ int bpf_prog1(struct pt_regs *ctx)
 SEC("kretprobe/kmem_cache_alloc_node")
 int bpf_prog2(struct pt_regs *ctx)
 {
-       long ptr = ctx->ax;
+       long ptr = PT_REGS_RC(ctx);
        long ip = 0;
 
        /* get ip address of kmem_cache_alloc_node() caller */
-       bpf_probe_read(&ip, sizeof(ip), (void *)(ctx->bp + sizeof(ip)));
+       bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip)));
 
        struct pair v = {
                .val = bpf_ktime_get_ns(),
index b71fe07a7a7a4820a77e7cb0d5a112febdbaa49c..b3f4295bf288536c1f9ae7500b542a77cf8aaec1 100644 (file)
@@ -24,7 +24,7 @@ int bpf_prog1(struct pt_regs *ctx)
 {
        struct seccomp_data sd = {};
 
-       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+       bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 
        /* dispatch into next BPF program depending on syscall number */
        bpf_tail_call(ctx, &progs, sd.nr);
@@ -42,7 +42,7 @@ PROG(__NR_write)(struct pt_regs *ctx)
 {
        struct seccomp_data sd = {};
 
-       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+       bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
        if (sd.args[2] == 512) {
                char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
                bpf_trace_printk(fmt, sizeof(fmt),
@@ -55,7 +55,7 @@ PROG(__NR_read)(struct pt_regs *ctx)
 {
        struct seccomp_data sd = {};
 
-       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+       bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
        if (sd.args[2] > 128 && sd.args[2] <= 1024) {
                char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
                bpf_trace_printk(fmt, sizeof(fmt),
index 8965d1bb881194685d1af344cbdfd2868c2aa9fd..125d6402f64f8555ec85b42f4c39e3af0f46ef3b 100644 (file)
  *
  *      For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
  *            Use __get_dynamic_array_len(foo) to get the length of the array
- *            saved.
+ *            saved. Note, __get_dynamic_array_len() returns the total allocated
+ *            length of the dynamic array; __print_array() expects the second
+ *            parameter to be the number of elements. To get that, the array length
+ *            needs to be divided by the element size.
  *
  *      For __string(foo, bar) use __get_str(foo)
  *
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
  *    This prints out the array that is defined by __array in a nice format.
  */
                  __print_array(__get_dynamic_array(list),
-                               __get_dynamic_array_len(list),
+                               __get_dynamic_array_len(list) / sizeof(int),
                                sizeof(int)),
                  __get_str(str), __get_bitmask(cpus))
 );
index 90e1edc8dd42fbfba3c31d03214f99d4e3c361b4..d5c8e9a3a73cfc1450b7c698e10aa9848a01dad2 100755 (executable)
@@ -2599,7 +2599,7 @@ sub process {
 # if LONG_LINE is ignored, the other 2 types are also ignored
 #
 
-               if ($length > $max_line_length) {
+               if ($line =~ /^\+/ && $length > $max_line_length) {
                        my $msg_type = "LONG_LINE";
 
                        # Check the allowed long line types first
index e72548b5897ec237dd7463374871538c81a84fd7..d33437007ad229313a5ef483826e427a74350876 100644 (file)
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
        if (index_key->type == &key_type_keyring)
                up_write(&keyring_serialise_link_sem);
 
-       if (edit && !edit->dead_leaf) {
-               key_payload_reserve(keyring,
-                                   keyring->datalen - KEYQUOTA_LINK_BYTES);
+       if (edit) {
+               if (!edit->dead_leaf) {
+                       key_payload_reserve(keyring,
+                               keyring->datalen - KEYQUOTA_LINK_BYTES);
+               }
                assoc_array_cancel_edit(edit);
        }
        up_write(&keyring->sem);
index d126c03361aef87fb1239df1e42f547c5ce36aa9..75888dd38a7fc87acf5fcd7789570ad353dce482 100644 (file)
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
 void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
 {
        if (substream->pcm->nonatomic) {
-               down_read(&snd_pcm_link_rwsem);
+               down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
                mutex_lock(&substream->self_group.mutex);
        } else {
                read_lock(&snd_pcm_link_rwlock);
index 2682e7e3e5c98511e8bd26fddf452427f1e83a6a..c670db4eee70d42c91db2904354bf391267c22ef 100644 (file)
@@ -248,6 +248,8 @@ efw_probe(struct fw_unit *unit,
        err = get_hardware_info(efw);
        if (err < 0)
                goto error;
+       if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2)
+               efw->is_af2 = true;
        if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
                efw->is_af9 = true;
 
index 4f0201a95222a2502ec438199fe9e78cad33b647..c33252b7bc847501c4fc2c7b2c18d07a9ec18898 100644 (file)
@@ -70,6 +70,7 @@ struct snd_efw {
        bool resp_addr_changable;
 
        /* for quirks */
+       bool is_af2;
        bool is_af9;
        u32 firmware_version;
 
index c55db1bddc80a0ceab4997279643f73840943cef..a0762dd6231e6080eb9ab86343c413db0bd64492 100644 (file)
@@ -172,6 +172,9 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
        efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
        /* Fireworks reset dbc at bus reset. */
        efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
+       /* AudioFire2 starts packets with non-zero dbc. */
+       if (efw->is_af2)
+               efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK;
        /* AudioFire9 always reports wrong dbs. */
        if (efw->is_af9)
                efw->tx_stream.flags |= CIP_WRONG_DBS;
index 442500e06b7c7b66be05ec254a68e58d009089fe..5676b849379d43468267e8d12fc7367096a2ae4c 100644 (file)
@@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
                enable ? "enable" : "disable");
 
        if (enable) {
-               if (!bus->i915_power_refcount++)
+               if (!bus->i915_power_refcount++) {
                        acomp->ops->get_power(acomp->dev);
+                       snd_hdac_set_codec_wakeup(bus, true);
+                       snd_hdac_set_codec_wakeup(bus, false);
+               }
        } else {
                WARN_ON(!bus->i915_power_refcount);
                if (!--bus->i915_power_refcount)
index ac0db1679f098ee4ec08c6770fe1f1374c8bf431..b077bb644434734004b7afca348bbac4643478a5 100644 (file)
@@ -5175,7 +5175,7 @@ static int alt_playback_pcm_open(struct hda_pcm_stream *hinfo,
        int err = 0;
 
        mutex_lock(&spec->pcm_mutex);
-       if (!spec->indep_hp_enabled)
+       if (spec->indep_hp && !spec->indep_hp_enabled)
                err = -EBUSY;
        else
                spec->active_streams |= 1 << STREAM_INDEP_HP;
index 745535d1840a6713e802aaa8c1d475733b88720e..c38c68f579381d657786945baa3ab99b3ccae787 100644 (file)
@@ -867,7 +867,7 @@ static int azx_suspend(struct device *dev)
 
        chip = card->private_data;
        hda = container_of(chip, struct hda_intel, chip);
-       if (chip->disabled || hda->init_failed)
+       if (chip->disabled || hda->init_failed || !chip->running)
                return 0;
 
        bus = azx_bus(chip);
@@ -902,7 +902,7 @@ static int azx_resume(struct device *dev)
 
        chip = card->private_data;
        hda = container_of(chip, struct hda_intel, chip);
-       if (chip->disabled || hda->init_failed)
+       if (chip->disabled || hda->init_failed || !chip->running)
                return 0;
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
@@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev)
        if (!azx_has_pm_runtime(chip))
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
-               && hda->need_i915_power) {
-               bus =  azx_bus(chip);
-               snd_hdac_display_power(bus, true);
-               haswell_set_bclk(hda);
-               /* toggle codec wakeup bit for STATESTS read */
-               snd_hdac_set_codec_wakeup(bus, true);
-               snd_hdac_set_codec_wakeup(bus, false);
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+               bus = azx_bus(chip);
+               if (hda->need_i915_power) {
+                       snd_hdac_display_power(bus, true);
+                       haswell_set_bclk(hda);
+               } else {
+                       /* toggle codec wakeup bit for STATESTS read */
+                       snd_hdac_set_codec_wakeup(bus, true);
+                       snd_hdac_set_codec_wakeup(bus, false);
+               }
        }
 
        /* Read STATESTS before controller reset */
@@ -1025,7 +1027,7 @@ static int azx_runtime_idle(struct device *dev)
                return 0;
 
        if (!power_save_controller || !azx_has_pm_runtime(chip) ||
-           azx_bus(chip)->codec_powered)
+           azx_bus(chip)->codec_powered || !chip->running)
                return -EBUSY;
 
        return 0;
@@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = {
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x1308),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0x157a),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x793b),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x7919),
@@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaac0),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaac8),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaad8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaae8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        /* VIA VT8251/VT8237A */
        { PCI_DEVICE(0x1106, 0x3288),
          .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
index 25ccf781fbe705cb5b2980814f7d4f90aff2d4a6..584a0343ab0cc132b7c2038679923b6617926cf7 100644 (file)
@@ -999,9 +999,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
 
        spec->spdif_present = spdif_present;
        /* SPDIF TX on/off */
-       if (spdif_present)
-               snd_hda_set_pin_ctl(codec, spdif_pin,
-                                   spdif_present ? PIN_OUT : 0);
+       snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
 
        cs_automute(codec);
 }
index 2f2433845d0487dd703301664cbc33ecfc77bc11..a97db5fc8a151aa43c0c99edc6ccf7efc0eaa52a 100644 (file)
@@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0072, .name = "GPU 72 HDMI/DP",  .patch = patch_nvhdmi },
+{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
 { .id = 0x11069f81, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3527,6 +3528,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x80862807, .name = "Haswell HDMI",    .patch = patch_generic_hdmi },
 { .id = 0x80862808, .name = "Broadwell HDMI",  .patch = patch_generic_hdmi },
 { .id = 0x80862809, .name = "Skylake HDMI",    .patch = patch_generic_hdmi },
+{ .id = 0x8086280a, .name = "Broxton HDMI",    .patch = patch_generic_hdmi },
 { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
 { .id = 0x80862882, .name = "Valleyview2 HDMI",        .patch = patch_generic_hdmi },
 { .id = 0x80862883, .name = "Braswell HDMI",   .patch = patch_generic_hdmi },
@@ -3575,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
 MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de0072");
+MODULE_ALIAS("snd-hda-codec-id:10de007d");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
 MODULE_ALIAS("snd-hda-codec-id:11069f81");
@@ -3591,6 +3594,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862806");
 MODULE_ALIAS("snd-hda-codec-id:80862807");
 MODULE_ALIAS("snd-hda-codec-id:80862808");
 MODULE_ALIAS("snd-hda-codec-id:80862809");
+MODULE_ALIAS("snd-hda-codec-id:8086280a");
 MODULE_ALIAS("snd-hda-codec-id:80862880");
 MODULE_ALIAS("snd-hda-codec-id:80862882");
 MODULE_ALIAS("snd-hda-codec-id:80862883");
index b3b44681d3cfbe90b2ff7bcbabba735dcfaac7f7..c456c04e0928d2c4ef9e65d114bdec3bf6b9332f 100644 (file)
@@ -2222,7 +2222,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
        SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
-       SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
+       SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
 
        SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -4441,6 +4441,55 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
        }
 }
 
+/* Hook to update amp GPIO4 for automute */
+static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
+                                         struct hda_jack_callback *jack)
+{
+       struct alc_spec *spec = codec->spec;
+
+       snd_hda_gen_hp_automute(codec, jack);
+       /* mute_led_polarity is set to 0, so we pass inverted value here */
+       alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present);
+}
+
+/* Manage GPIOs for HP EliteBook Folio 9480m.
+ *
+ * GPIO4 is the headphone amplifier power control
+ * GPIO3 is the audio output mute indicator LED
+ */
+
+static void alc280_fixup_hp_9480m(struct hda_codec *codec,
+                                 const struct hda_fixup *fix,
+                                 int action)
+{
+       struct alc_spec *spec = codec->spec;
+       static const struct hda_verb gpio_init[] = {
+               { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
+               { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
+               {}
+       };
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               /* Set the hooks to turn the headphone amp on/off
+                * as needed
+                */
+               spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
+               spec->gen.hp_automute_hook = alc280_hp_gpio4_automute_hook;
+
+               /* The GPIOs are currently off */
+               spec->gpio_led = 0;
+
+               /* GPIO3 is connected to the output mute LED,
+                * high is on, low is off
+                */
+               spec->mute_led_polarity = 0;
+               spec->gpio_mute_led_mask = 0x08;
+
+               /* Initialize GPIO configuration */
+               snd_hda_add_verbs(codec, gpio_init);
+       }
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -4521,6 +4570,7 @@ enum {
        ALC286_FIXUP_HP_GPIO_LED,
        ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
        ALC280_FIXUP_HP_DOCK_PINS,
+       ALC280_FIXUP_HP_9480M,
        ALC288_FIXUP_DELL_HEADSET_MODE,
        ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC288_FIXUP_DELL_XPS_13_GPIO6,
@@ -5011,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = {
                        { 0x14, 0x90170110 },
                        { 0x17, 0x40000008 },
                        { 0x18, 0x411111f0 },
-                       { 0x19, 0x411111f0 },
+                       { 0x19, 0x01a1913c },
                        { 0x1a, 0x411111f0 },
                        { 0x1b, 0x411111f0 },
                        { 0x1d, 0x40f89b2d },
@@ -5043,6 +5093,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC280_FIXUP_HP_GPIO4
        },
+       [ALC280_FIXUP_HP_9480M] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc280_fixup_hp_9480m,
+       },
        [ALC288_FIXUP_DELL_HEADSET_MODE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mode_dell_alc288,
@@ -5131,6 +5185,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+       SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5161,6 +5216,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+       SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
        SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
        /* ALC290 */
@@ -5343,8 +5399,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {0x19, 0x411111f0}, \
        {0x1a, 0x411111f0}, \
        {0x1b, 0x411111f0}, \
-       {0x1d, 0x40700001}, \
-       {0x1e, 0x411111f0}, \
        {0x21, 0x02211020}
 
 #define ALC282_STANDARD_PINS \
@@ -5375,8 +5429,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {0x15, 0x0221401f}, \
        {0x1a, 0x411111f0}, \
        {0x1b, 0x411111f0}, \
-       {0x1d, 0x40700001}, \
-       {0x1e, 0x411111f0}
+       {0x1d, 0x40700001}
 
 #define ALC298_STANDARD_PINS \
        {0x18, 0x411111f0}, \
@@ -5407,6 +5460,39 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170130},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x01014020},
+               {0x1d, 0x4054c029},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0221103f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170150},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x02011020},
+               {0x1d, 0x4054c029},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0221105f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x14, 0x90170110},
+               {0x17, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x01014020},
+               {0x1d, 0x4054c029},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
@@ -5469,10 +5555,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS,
-               {0x13, 0x40000000}),
+               {0x13, 0x40000000},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC256_STANDARD_PINS,
+               {0x13, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS,
-               {0x13, 0x411111f0}),
+               {0x13, 0x411111f0},
+               {0x1d, 0x4077992d},
+               {0x1e, 0x411111ff}),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
                {0x12, 0x90a60130},
                {0x13, 0x40000000},
@@ -5635,35 +5730,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x13, 0x411111f0},
                {0x16, 0x01014020},
                {0x18, 0x411111f0},
-               {0x19, 0x01a19030}),
+               {0x19, 0x01a19030},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x90a60140},
                {0x13, 0x411111f0},
                {0x16, 0x01014020},
                {0x18, 0x02a19031},
-               {0x19, 0x01a1903e}),
+               {0x19, 0x01a1903e},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x90a60140},
                {0x13, 0x411111f0},
                {0x16, 0x411111f0},
                {0x18, 0x411111f0},
-               {0x19, 0x411111f0}),
+               {0x19, 0x411111f0},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x40000000},
                {0x13, 0x90a60140},
                {0x16, 0x21014020},
                {0x18, 0x411111f0},
-               {0x19, 0x21a19030}),
+               {0x19, 0x21a19030},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC292_STANDARD_PINS,
                {0x12, 0x40000000},
                {0x13, 0x90a60140},
                {0x16, 0x411111f0},
                {0x18, 0x411111f0},
-               {0x19, 0x411111f0}),
+               {0x19, 0x411111f0},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC292_STANDARD_PINS,
+               {0x12, 0x40000000},
+               {0x13, 0x90a60140},
+               {0x16, 0x21014020},
+               {0x18, 0x411111f0},
+               {0x19, 0x21a19030},
+               {0x1e, 0x411111ff}),
        SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC298_STANDARD_PINS,
                {0x12, 0x90a60130},
index dcc7fe91244c2dfada5c97fbb42df06dbd857bbf..9d947aef2c8b60b99f63fd9464ad289bef0c7061 100644 (file)
@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
                      "HP Mini", STAC_92HD83XXX_HP_LED),
        SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
-       SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
+       /* match both for 0xfa91 and 0xfa93 */
+       SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
                      "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
        {} /* terminator */
 };
index 477e13d309713e56a5a4416d054350185fea0007..e7ba557979cb2589439234d6bd0ddbbdc38985e5 100644 (file)
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
 
        if (val != -1) {
                regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
-                                       PCM1681_DEEMPH_RATE_MASK, val);
+                                  PCM1681_DEEMPH_RATE_MASK, val << 3);
                enable = 1;
        } else
                enable = 0;
index 9ce311e088fc514bf8cde70dd120a3074f78bfe5..e9cc3aae5366d30d0522b105ed11fdb88943948f 100644 (file)
@@ -2943,6 +2943,9 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645)
 {
        int val, btn_type, gpio_state = 0, report = 0;
 
+       if (!rt5645->codec)
+               return -EINVAL;
+
        switch (rt5645->pdata.jd_mode) {
        case 0: /* Not using rt5645 JD */
                if (rt5645->gpiod_hp_det) {
index bd7a344bf8c517edf69af2fda9e06e08488e47e0..1c317de2617623438bd169d9ba7cb89623139794 100644 (file)
 #define SGTL5000_BIAS_CTRL_MASK                        0x000e
 #define SGTL5000_BIAS_CTRL_SHIFT               1
 #define SGTL5000_BIAS_CTRL_WIDTH               3
-#define SGTL5000_SMALL_POP                     0
+#define SGTL5000_SMALL_POP                     1
 
 /*
  * SGTL5000_CHIP_MIC_CTRL
index 938d2cb6d78bee03e809f02462b17f951f0c953a..84a4f5ad80645f2f45079ea35fe8ce19da901f09 100644 (file)
@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        if (invert_fclk)
                ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
 
-       return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
+       return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
+                       SSM4567_SAI_CTRL_1_BCLK |
+                       SSM4567_SAI_CTRL_1_FSYNC |
+                       SSM4567_SAI_CTRL_1_LJ |
+                       SSM4567_SAI_CTRL_1_TDM |
+                       SSM4567_SAI_CTRL_1_PDM,
+                       ctrl1);
 }
 
 static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
index c7647e066cfd76b07f6b1acd4d2a68ac04bae3bc..c0b940e2019f55f34bb53673a99a4594d7409ef3 100644 (file)
@@ -633,7 +633,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
                sub *= 100000;
                do_div(sub, freq);
 
-               if (sub < savesub) {
+               if (sub < savesub && !(i == 0 && psr == 0 && div2 == 0)) {
                        baudrate = tmprate;
                        savesub = sub;
                        pm = i;
index 3853ec2ddbc758d2c558dfcbe093d2cee59af8a1..6de5d5cd3280a92bf9d1b0ab71a5171e82a5bb0b 100644 (file)
@@ -7,4 +7,4 @@ obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
 obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
 
 # Machine support
-obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/
+obj-$(CONFIG_SND_SOC) += boards/
index 620da1d1b9e3ea4fbde293c3e98fae4c30708ac1..0e0e4d9c021ff29bdf1f19eda2827ac459e4046c 100644 (file)
 #define MIN_FRAGMENT_SIZE (50 * 1024)
 #define MAX_FRAGMENT_SIZE (1024 * 1024)
 #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz)  (((pcm_wd_sz + 15) >> 4) << 1)
+#ifdef CONFIG_PM
+#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
+#else
+#define GET_USAGE_COUNT(dev) 1
+#endif
 
 int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
 {
@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
        int ret = 0;
        int usage_count = 0;
 
-#ifdef CONFIG_PM
-       usage_count = atomic_read(&dev->power.usage_count);
-#else
-       usage_count = 1;
-#endif
-
        if (state == true) {
                ret = pm_runtime_get_sync(dev);
-
+               usage_count = GET_USAGE_COUNT(dev);
                dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
                if (ret < 0) {
                        dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
                        }
                }
        } else {
+               usage_count = GET_USAGE_COUNT(dev);
                dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
                return sst_pm_runtime_put(ctx);
        }
index d604ee80eda4be715601db603181e0291cc98747..70f832114a5aeffeeb7b0cc555f8f1b91bc40665 100644 (file)
@@ -69,12 +69,12 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
        {"Headphone", NULL, "HPR"},
        {"Ext Spk", NULL, "SPKL"},
        {"Ext Spk", NULL, "SPKR"},
-       {"AIF1 Playback", NULL, "ssp2 Tx"},
+       {"HiFi Playback", NULL, "ssp2 Tx"},
        {"ssp2 Tx", NULL, "codec_out0"},
        {"ssp2 Tx", NULL, "codec_out1"},
        {"codec_in0", NULL, "ssp2 Rx" },
        {"codec_in1", NULL, "ssp2 Rx" },
-       {"ssp2 Rx", NULL, "AIF1 Capture"},
+       {"ssp2 Rx", NULL, "HiFi Capture"},
 };
 
 static const struct snd_kcontrol_new cht_mc_controls[] = {
index 4d44b5803e55206c02006b9359449df377477b6c..2d2536af141fc9157b2ecb402a969af6c808faf6 100644 (file)
@@ -103,7 +103,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
                .name = "MAX98090 Playback",
                .stream_name = "MAX98090 Playback",
                .cpu_dai_name = "DL1",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -114,7 +113,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
                .name = "MAX98090 Capture",
                .stream_name = "MAX98090 Capture",
                .cpu_dai_name = "VUL",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -125,7 +123,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
        {
                .name = "Codec",
                .cpu_dai_name = "I2S",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .no_pcm = 1,
                .codec_dai_name = "HiFi",
                .init = mt8173_max98090_init,
@@ -152,9 +149,21 @@ static struct snd_soc_card mt8173_max98090_card = {
 static int mt8173_max98090_dev_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card = &mt8173_max98090_card;
-       struct device_node *codec_node;
+       struct device_node *codec_node, *platform_node;
        int ret, i;
 
+       platform_node = of_parse_phandle(pdev->dev.of_node,
+                                        "mediatek,platform", 0);
+       if (!platform_node) {
+               dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+               return -EINVAL;
+       }
+       for (i = 0; i < card->num_links; i++) {
+               if (mt8173_max98090_dais[i].platform_name)
+                       continue;
+               mt8173_max98090_dais[i].platform_of_node = platform_node;
+       }
+
        codec_node = of_parse_phandle(pdev->dev.of_node,
                                      "mediatek,audio-codec", 0);
        if (!codec_node) {
index 0940553230596aed2ba41787b8605e26c2cc11c7..6f52eca05e2600f34c5b76deb77db81ab6fb9290 100644 (file)
@@ -138,7 +138,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
                .name = "rt5650_rt5676 Playback",
                .stream_name = "rt5650_rt5676 Playback",
                .cpu_dai_name = "DL1",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -149,7 +148,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
                .name = "rt5650_rt5676 Capture",
                .stream_name = "rt5650_rt5676 Capture",
                .cpu_dai_name = "VUL",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .codec_name = "snd-soc-dummy",
                .codec_dai_name = "snd-soc-dummy-dai",
                .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -161,7 +159,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
        {
                .name = "Codec",
                .cpu_dai_name = "I2S",
-               .platform_name = "11220000.mt8173-afe-pcm",
                .no_pcm = 1,
                .codecs = mt8173_rt5650_rt5676_codecs,
                .num_codecs = 2,
@@ -209,7 +206,21 @@ static struct snd_soc_card mt8173_rt5650_rt5676_card = {
 static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
-       int ret;
+       struct device_node *platform_node;
+       int i, ret;
+
+       platform_node = of_parse_phandle(pdev->dev.of_node,
+                                        "mediatek,platform", 0);
+       if (!platform_node) {
+               dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < card->num_links; i++) {
+               if (mt8173_rt5650_rt5676_dais[i].platform_name)
+                       continue;
+               mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
+       }
 
        mt8173_rt5650_rt5676_codecs[0].of_node =
                of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
index cc228db5fb760eaa24f98d91b0b14f0eded80ea9..9863da73dfe03a0f057d9897fd89e7d4d1614194 100644 (file)
@@ -1199,6 +1199,8 @@ err_pm_disable:
 static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
 {
        pm_runtime_disable(&pdev->dev);
+       if (!pm_runtime_status_suspended(&pdev->dev))
+               mtk_afe_runtime_suspend(&pdev->dev);
        snd_soc_unregister_component(&pdev->dev);
        snd_soc_unregister_platform(&pdev->dev);
        return 0;
index 3a4a5c0e3f9737c795f74367b04c9825462651f8..0e1e69c7abd56b25fab6e4ffb0eab5fd574d03d2 100644 (file)
@@ -1716,6 +1716,7 @@ card_probe_error:
        if (card->remove)
                card->remove(card);
 
+       snd_soc_dapm_free(&card->dapm);
        soc_cleanup_card_debugfs(card);
        snd_card_free(card->snd_card);
 
index aa327c92480c56c2609699380594a75ad765fbda..e0de8072c5144e92fce3ec9518787a4db526d726 100644 (file)
@@ -358,9 +358,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
                        data->widget =
                                snd_soc_dapm_new_control_unlocked(widget->dapm,
                                &template);
+                       kfree(name);
                        if (!data->widget) {
                                ret = -ENOMEM;
-                               goto err_name;
+                               goto err_data;
                        }
                }
                break;
@@ -389,11 +390,12 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 
                        data->value = template.on_val;
 
-                       data->widget = snd_soc_dapm_new_control(widget->dapm,
-                                       &template);
+                       data->widget = snd_soc_dapm_new_control_unlocked(
+                                               widget->dapm, &template);
+                       kfree(name);
                        if (!data->widget) {
                                ret = -ENOMEM;
-                               goto err_name;
+                               goto err_data;
                        }
 
                        snd_soc_dapm_add_path(widget->dapm, data->widget,
@@ -408,8 +410,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 
        return 0;
 
-err_name:
-       kfree(name);
 err_data:
        kfree(data);
        return ret;
@@ -418,8 +418,6 @@ err_data:
 static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
 {
        struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
-       if (data->widget)
-               kfree(data->widget->name);
        kfree(data->wlist);
        kfree(data);
 }
@@ -1952,6 +1950,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
                                           size_t count, loff_t *ppos)
 {
        struct snd_soc_dapm_widget *w = file->private_data;
+       struct snd_soc_card *card = w->dapm->card;
        char *buf;
        int in, out;
        ssize_t ret;
@@ -1961,6 +1960,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
        if (!buf)
                return -ENOMEM;
 
+       mutex_lock(&card->dapm_mutex);
+
        /* Supply widgets are not handled by is_connected_{input,output}_ep() */
        if (w->is_supply) {
                in = 0;
@@ -2007,6 +2008,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
                                        p->sink->name);
        }
 
+       mutex_unlock(&card->dapm_mutex);
+
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
 
        kfree(buf);
@@ -2281,11 +2284,15 @@ static ssize_t dapm_widget_show(struct device *dev,
        struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
        int i, count = 0;
 
+       mutex_lock(&rtd->card->dapm_mutex);
+
        for (i = 0; i < rtd->num_codecs; i++) {
                struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
                count += dapm_widget_show_codec(codec, buf + count);
        }
 
+       mutex_unlock(&rtd->card->dapm_mutex);
+
        return count;
 }
 
@@ -3334,16 +3341,10 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
        }
 
        prefix = soc_dapm_prefix(dapm);
-       if (prefix) {
+       if (prefix)
                w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
-               if (widget->sname)
-                       w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
-                                            widget->sname);
-       } else {
+       else
                w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
-               if (widget->sname)
-                       w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
-       }
        if (w->name == NULL) {
                kfree(w);
                return NULL;
@@ -3792,7 +3793,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
                                break;
                        }
 
-                       if (!w->sname || !strstr(w->sname, dai_w->name))
+                       if (!w->sname || !strstr(w->sname, dai_w->sname))
                                continue;
 
                        if (dai_w->id == snd_soc_dapm_dai_in) {
index d0960683c4093c4303743b1e7e4f93190e11a3a5..59ac211f8fe7c273c84dcd67ac94bb27cb760564 100644 (file)
@@ -144,7 +144,7 @@ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = {
        {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe,
                snd_soc_put_strobe, NULL},
        {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw,
-               snd_soc_dapm_put_volsw, NULL},
+               snd_soc_dapm_put_volsw, snd_soc_info_volsw},
        {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double,
                snd_soc_dapm_put_enum_double, snd_soc_info_enum_double},
        {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double,
@@ -580,27 +580,26 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
 }
 
 static int soc_tplg_create_tlv(struct soc_tplg *tplg,
-       struct snd_kcontrol_new *kc, u32 tlv_size)
+       struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv)
 {
-       struct snd_soc_tplg_ctl_tlv *tplg_tlv;
        struct snd_ctl_tlv *tlv;
+       int size;
 
-       if (tlv_size == 0)
+       if (tplg_tlv->count == 0)
                return 0;
 
-       tplg_tlv = (struct snd_soc_tplg_ctl_tlv *) tplg->pos;
-       tplg->pos += tlv_size;
-
-       tlv = kzalloc(sizeof(*tlv) + tlv_size, GFP_KERNEL);
+       size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) &
+               ~(sizeof(unsigned int) - 1));
+       tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL);
        if (tlv == NULL)
                return -ENOMEM;
 
        dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n",
-               tplg_tlv->numid, tplg_tlv->size);
+               tplg_tlv->numid, size);
 
        tlv->numid = tplg_tlv->numid;
-       tlv->length = tplg_tlv->size;
-       memcpy(tlv->tlv, tplg_tlv + 1, tplg_tlv->size);
+       tlv->length = size;
+       memcpy(&tlv->tlv[0], tplg_tlv->data, size);
        kc->tlv.p = (void *)tlv;
 
        return 0;
@@ -773,7 +772,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
                }
 
                /* create any TLV data */
-               soc_tplg_create_tlv(tplg, &kc, mc->hdr.tlv_size);
+               soc_tplg_create_tlv(tplg, &kc, &mc->tlv);
 
                /* register control here */
                err = soc_tplg_add_kcontrol(tplg, &kc,
index 98d96e1b17e05847aaa2b48740f3f9f0cc2c6e2a..1930c42e1f557ae62c30003b379088f242b0dbfe 100644 (file)
@@ -393,9 +393,9 @@ static int zx_i2s_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        zx_i2s->mapbase = res->start;
        zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!zx_i2s->reg_base) {
+       if (IS_ERR(zx_i2s->reg_base)) {
                dev_err(&pdev->dev, "ioremap failed!\n");
-               return -EIO;
+               return PTR_ERR(zx_i2s->reg_base);
        }
 
        writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
index 11a0e46a1156913a23d9f31852fa508cbd1f8c5f..26265ce4caca17da4b6315a4627130ac63851063 100644 (file)
@@ -322,9 +322,9 @@ static int zx_spdif_probe(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        zx_spdif->mapbase = res->start;
        zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!zx_spdif->reg_base) {
+       if (IS_ERR(zx_spdif->reg_base)) {
                dev_err(&pdev->dev, "ioremap failed!\n");
-               return -EIO;
+               return PTR_ERR(zx_spdif->reg_base);
        }
 
        zx_spdif_dev_init(zx_spdif->reg_base);
index 1b1a89e80d1394bb253aecc3cadc683dbcff3a36..784ceb85b2d9fe7cbe989d0c8443863f1d03df34 100644 (file)
@@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card,
        if (!amd->regs) {
                snd_printk(KERN_ERR
                           "amd7930-%d: Unable to map chip registers.\n", dev);
+               kfree(amd);
                return -EIO;
        }
 
index 8461d6bf992f8d39f709097be8305fc14efbb506..204cc074adb96f8f99ebd9d69bd7e1e4cf91f98e 100644 (file)
@@ -186,12 +186,8 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
        int ret = 0;
 
        spin_lock_irqsave(&pstr->lock, flags);
-       if (!test_and_set_bit(type, &pstr->running)) {
-               if (pstr->active_urbs || pstr->unlink_urbs) {
-                       ret = -EBUSY;
-                       goto error;
-               }
-
+       if (!test_and_set_bit(type, &pstr->running) &&
+           !(pstr->active_urbs || pstr->unlink_urbs)) {
                pstr->count = 0;
                /* Submit all currently available URBs */
                if (direction == SNDRV_PCM_STREAM_PLAYBACK)
@@ -199,7 +195,6 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
                else
                        ret = line6_submit_audio_in_all_urbs(line6pcm);
        }
- error:
        if (ret < 0)
                clear_bit(type, &pstr->running);
        spin_unlock_irqrestore(&pstr->lock, flags);
index e5000da9e9d7093f6e287194665de2d63f046e93..6a803eff87f71110049d9c39cb07025d2c64d828 100644 (file)
@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
        { 0 }
 };
 
+/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
+static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
+static struct usbmix_name_map bose_companion5_map[] = {
+       { 3, NULL, .dB = &bose_companion5_dB },
+       { 0 }   /* terminator */
+};
+
+/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
+static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
+static struct usbmix_name_map dragonfly_1_2_map[] = {
+       { 7, NULL, .dB = &dragonfly_1_2_dB },
+       { 0 }   /* terminator */
+};
+
 /*
  * Control map entries
  */
@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x25c4, 0x0003),
                .map = scms_usb3318_map,
        },
+       {
+               /* Bose Companion 5 */
+               .id = USB_ID(0x05a7, 0x1020),
+               .map = bose_companion5_map,
+       },
+       {
+               /* Dragonfly DAC 1.2 */
+               .id = USB_ID(0x21b4, 0x0081),
+               .map = dragonfly_1_2_map,
+       },
        { 0 } /* terminator */
 };
 
index 2f6d3e9a1bcd0144cf81d77f3ec085af3984d07e..e4756651a52c8873457ae340450934dcfbd5dfad 100644 (file)
@@ -2512,6 +2512,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
        }
 },
 
+/* Steinberg devices */
+{
+       /* Steinberg MI2 */
+       USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = & (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = &(const struct snd_usb_midi_endpoint_info) {
+                                       .out_cables = 0x0001,
+                                       .in_cables  = 0x0001
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+{
+       /* Steinberg MI4 */
+       USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = & (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = &(const struct snd_usb_midi_endpoint_info) {
+                                       .out_cables = 0x0001,
+                                       .in_cables  = 0x0001
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+
 /* TerraTec devices */
 {
        USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
index 8bd9606584632582f7e893c0a3c01e8f0240678f..fe1b02c2c95bbe1274376f1b20ffebfac7358cc7 100644 (file)
@@ -36,7 +36,7 @@ $(LIBFILE): $(API_IN)
 
 clean:
        $(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
-       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o | xargs $(RM)
+       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
 
 FORCE:
 
diff --git a/tools/lib/hweight.c b/tools/lib/hweight.c
new file mode 100644 (file)
index 0000000..0b859b8
--- /dev/null
@@ -0,0 +1,62 @@
+#include <linux/bitops.h>
+#include <asm/types.h>
+
+/**
+ * hweightN - returns the hamming weight of a N-bit word
+ * @x: the word to weigh
+ *
+ * The Hamming Weight of a number is the total number of bits set in it.
+ */
+
+unsigned int __sw_hweight32(unsigned int w)
+{
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
+       w -= (w >> 1) & 0x55555555;
+       w =  (w & 0x33333333) + ((w >> 2) & 0x33333333);
+       w =  (w + (w >> 4)) & 0x0f0f0f0f;
+       return (w * 0x01010101) >> 24;
+#else
+       unsigned int res = w - ((w >> 1) & 0x55555555);
+       res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+       res = (res + (res >> 4)) & 0x0F0F0F0F;
+       res = res + (res >> 8);
+       return (res + (res >> 16)) & 0x000000FF;
+#endif
+}
+
+unsigned int __sw_hweight16(unsigned int w)
+{
+       unsigned int res = w - ((w >> 1) & 0x5555);
+       res = (res & 0x3333) + ((res >> 2) & 0x3333);
+       res = (res + (res >> 4)) & 0x0F0F;
+       return (res + (res >> 8)) & 0x00FF;
+}
+
+unsigned int __sw_hweight8(unsigned int w)
+{
+       unsigned int res = w - ((w >> 1) & 0x55);
+       res = (res & 0x33) + ((res >> 2) & 0x33);
+       return (res + (res >> 4)) & 0x0F;
+}
+
+unsigned long __sw_hweight64(__u64 w)
+{
+#if BITS_PER_LONG == 32
+       return __sw_hweight32((unsigned int)(w >> 32)) +
+              __sw_hweight32((unsigned int)w);
+#elif BITS_PER_LONG == 64
+#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
+       w -= (w >> 1) & 0x5555555555555555ul;
+       w =  (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
+       w =  (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
+       return (w * 0x0101010101010101ul) >> 56;
+#else
+       __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
+       res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+       res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+       res = res + (res >> 8);
+       res = res + (res >> 16);
+       return (res + (res >> 32)) & 0x00000000000000FFul;
+#endif
+#endif
+}
index 6daaff652affdde16240c91330781864aaedfd35..7851df1490e0a81f6a17776bbdb464f564a99894 100644 (file)
@@ -268,7 +268,7 @@ install: install_lib
 
 clean:
        $(call QUIET_CLEAN, libtraceevent) \
-               $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \
+               $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
                $(RM) TRACEEVENT-CFLAGS tags TAGS
 
 PHONY += force plugins
index 618c2bcd4eabc6143b0e7f0431f57b8620101fe5..2cd3d4c997383af3fde9ed090eb5d9ab32a7e467 100644 (file)
 #include <string.h>
 #include <bfd.h>
 #include <dis-asm.h>
+#include <regex.h>
+#include <fcntl.h>
 #include <sys/klog.h>
 #include <sys/types.h>
-#include <regex.h>
+#include <sys/stat.h>
+
+#define CMD_ACTION_SIZE_BUFFER         10
+#define CMD_ACTION_READ_ALL            3
 
 static void get_exec_path(char *tpath, size_t size)
 {
@@ -87,20 +92,66 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
        bfd_close(bfdf);
 }
 
-static char *get_klog_buff(int *klen)
+static char *get_klog_buff(unsigned int *klen)
 {
-       int ret, len = klogctl(10, NULL, 0);
-       char *buff = malloc(len);
+       int ret, len;
+       char *buff;
+
+       len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+       buff = malloc(len);
+       if (!buff)
+               return NULL;
+
+       ret = klogctl(CMD_ACTION_READ_ALL, buff, len);
+       if (ret < 0) {
+               free(buff);
+               return NULL;
+       }
 
-       assert(buff && klen);
-       ret = klogctl(3, buff, len);
-       assert(ret >= 0);
        *klen = ret;
+       return buff;
+}
 
+static char *get_flog_buff(const char *file, unsigned int *klen)
+{
+       int fd, ret, len;
+       struct stat fi;
+       char *buff;
+
+       fd = open(file, O_RDONLY);
+       if (fd < 0)
+               return NULL;
+
+       ret = fstat(fd, &fi);
+       if (ret < 0 || !S_ISREG(fi.st_mode))
+               goto out;
+
+       len = fi.st_size + 1;
+       buff = malloc(len);
+       if (!buff)
+               goto out;
+
+       memset(buff, 0, len);
+       ret = read(fd, buff, len - 1);
+       if (ret <= 0)
+               goto out_free;
+
+       close(fd);
+       *klen = ret;
        return buff;
+out_free:
+       free(buff);
+out:
+       close(fd);
+       return NULL;
+}
+
+static char *get_log_buff(const char *file, unsigned int *klen)
+{
+       return file ? get_flog_buff(file, klen) : get_klog_buff(klen);
 }
 
-static void put_klog_buff(char *buff)
+static void put_log_buff(char *buff)
 {
        free(buff);
 }
@@ -138,8 +189,10 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
        ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
                     &flen, &proglen, &pass, &base);
-       if (ret != 4)
+       if (ret != 4) {
+               regfree(&regex);
                return 0;
+       }
 
        tmp = ptr = haystack + off;
        while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) {
@@ -169,31 +222,49 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        return ulen;
 }
 
+static void usage(void)
+{
+       printf("Usage: bpf_jit_disasm [...]\n");
+       printf("       -o          Also display related opcodes (default: off).\n");
+       printf("       -f <file>   Read last image dump from file or stdin (default: klog).\n");
+       printf("       -h          Display this help.\n");
+}
+
 int main(int argc, char **argv)
 {
-       int len, klen, opcodes = 0;
-       char *kbuff;
+       unsigned int len, klen, opt, opcodes = 0;
        static uint8_t image[32768];
+       char *kbuff, *file = NULL;
 
-       if (argc > 1) {
-               if (!strncmp("-o", argv[argc - 1], 2)) {
+       while ((opt = getopt(argc, argv, "of:")) != -1) {
+               switch (opt) {
+               case 'o':
                        opcodes = 1;
-               } else {
-                       printf("usage: bpf_jit_disasm [-o: show opcodes]\n");
-                       exit(0);
+                       break;
+               case 'f':
+                       file = optarg;
+                       break;
+               default:
+                       usage();
+                       return -1;
                }
        }
 
        bfd_init();
        memset(image, 0, sizeof(image));
 
-       kbuff = get_klog_buff(&klen);
+       kbuff = get_log_buff(file, &klen);
+       if (!kbuff) {
+               fprintf(stderr, "Could not retrieve log buffer!\n");
+               return -1;
+       }
 
        len = get_last_jit_image(kbuff, klen, image, sizeof(image));
        if (len > 0)
                get_asm_insns(image, len, opcodes);
+       else
+               fprintf(stderr, "No JIT image found!\n");
 
-       put_klog_buff(kbuff);
-
+       put_log_buff(kbuff);
        return 0;
 }
index 09dc0aabb5154cb0906b6299afed4a3d0fc009c8..d01a0aad5a01b7e2e5c8d0f45d90c72f9e713574 100644 (file)
@@ -18,6 +18,7 @@ tools/arch/x86/include/asm/atomic.h
 tools/arch/x86/include/asm/rmwcc.h
 tools/lib/traceevent
 tools/lib/api
+tools/lib/hweight.c
 tools/lib/rbtree.c
 tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
@@ -57,7 +58,6 @@ include/linux/perf_event.h
 include/linux/list.h
 include/linux/hash.h
 include/linux/stringify.h
-lib/hweight.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
 arch/*/include/uapi/asm/unistd*.h
index 7a4b549214e34715aaf2ff7b834598ef2bae8080..bba34636b7334b334bdb520d3ebd8925f86f84c0 100644 (file)
@@ -109,9 +109,22 @@ $(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
        $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
        $(Q)touch $(OUTPUT)PERF-VERSION-FILE
 
-CC = $(CROSS_COMPILE)gcc
-LD ?= $(CROSS_COMPILE)ld
-AR = $(CROSS_COMPILE)ar
+# Makefiles suck: This macro sets a default value of $(2) for the
+# variable named by $(1), unless the variable has been set by
+# environment or command line. This is necessary for CC and AR
+# because make sets default values, so the simpler ?= approach
+# won't work as expected.
+define allow-override
+  $(if $(or $(findstring environment,$(origin $(1))),\
+            $(findstring command line,$(origin $(1)))),,\
+    $(eval $(1) = $(2)))
+endef
+
+# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
+$(call allow-override,CC,$(CROSS_COMPILE)gcc)
+$(call allow-override,AR,$(CROSS_COMPILE)ar)
+$(call allow-override,LD,$(CROSS_COMPILE)ld)
+
 PKG_CONFIG = $(CROSS_COMPILE)pkg-config
 
 RM      = rm -f
index 37e301a32f437eb6004ba4f4ccc6b90c73b9aa5c..d99d850e1444c9751cdbf7e0552b314e26597fef 100644 (file)
@@ -343,7 +343,7 @@ static int read_counter(struct perf_evsel *counter)
        return 0;
 }
 
-static void read_counters(bool close)
+static void read_counters(bool close_counters)
 {
        struct perf_evsel *counter;
 
@@ -354,7 +354,7 @@ static void read_counters(bool close)
                if (process_counter(counter))
                        pr_warning("failed to process counter %s\n", counter->name);
 
-               if (close) {
+               if (close_counters) {
                        perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
                                             thread_map__nr(evsel_list->threads));
                }
index 7629bef2fd791b41f64e49da53bc895e62d3dab4..fa67613976a80147e672bd2b960c9f577ff06b74 100644 (file)
@@ -48,7 +48,7 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
 
 static bool hist_browser__has_filter(struct hist_browser *hb)
 {
-       return hists__has_filter(hb->hists) || hb->min_pcnt;
+       return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter;
 }
 
 static int hist_browser__get_folding(struct hist_browser *browser)
index 601d11440596dfd2b1d5244f56e972487d3fecff..d2d318c59b379531903f775abd9f842a02e01ca8 100644 (file)
@@ -143,6 +143,6 @@ $(OUTPUT)util/rbtree.o: ../lib/rbtree.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
-$(OUTPUT)util/hweight.o: ../../lib/hweight.c FORCE
+$(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
index 7e7405c9b9361638f649820ae369bdec64e89c20..83d9dd96fe08ea8c613d1bda4db9b00bf4d04cd9 100644 (file)
@@ -53,11 +53,6 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
 {
        struct perf_event_mmap_page *pc = userpg;
 
-#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
-       pr_err("Cannot use AUX area tracing mmaps\n");
-       return -1;
-#endif
-
        WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
 
        mm->userpg = userpg;
@@ -73,6 +68,11 @@ int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
                return 0;
        }
 
+#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
+       pr_err("Cannot use AUX area tracing mmaps\n");
+       return -1;
+#endif
+
        pc->aux_offset = mp->offset;
        pc->aux_size = mp->len;
 
index e23ded40c79e3ecd62642bf2a07a34c5fc7662f8..0766d98c5da59dd1aa44c41aaee1791502ac9a0d 100644 (file)
@@ -10,7 +10,7 @@ util/ctype.c
 util/evlist.c
 util/evsel.c
 util/cpumap.c
-../../lib/hweight.c
+../lib/hweight.c
 util/thread_map.c
 util/util.c
 util/xyarray.c
@@ -19,5 +19,5 @@ util/rblist.c
 util/stat.c
 util/strlist.c
 util/trace-event.c
-../../lib/rbtree.c
+../lib/rbtree.c
 util/string.c
index 48b588c6951a9476246d6c71a9ee7b8535a8be37..60f11414bb5c8c6b463980f7b767662654e0331d 100644 (file)
@@ -1911,6 +1911,8 @@ int setup_list(struct strlist **list, const char *list_str,
                pr_err("problems parsing %s list\n", list_name);
                return -1;
        }
+
+       symbol_conf.has_filter = true;
        return 0;
 }
 
index bef47ead1d9bd1efc5e9620f04b13714286ff616..b98ce51af1422f8e22415d6a104b4a19810940cc 100644 (file)
@@ -105,7 +105,8 @@ struct symbol_conf {
                        demangle_kernel,
                        filter_relative,
                        show_hist_headers,
-                       branch_callstack;
+                       branch_callstack,
+                       has_filter;
        const char      *vmlinux_name,
                        *kallsyms_name,
                        *source_prefix,
index da7646d767feba14c4f1ad3ba60f3758d5f89413..292ae2c90e063d958da5f5859787c4c1c70ccab5 100644 (file)
@@ -136,8 +136,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
                if (grow) {
                        struct thread_map *tmp;
 
-                       tmp = realloc(threads, (sizeof(*threads) +
-                                               max_threads * sizeof(pid_t)));
+                       tmp = thread_map__realloc(threads, max_threads);
                        if (tmp == NULL)
                                goto out_free_namelist;
 
index 4b89118f158db458ae29cca6d20b093d81392cf6..44d440da15dcf73f97a81518c97a33be5c32b89b 100644 (file)
@@ -236,18 +236,16 @@ static struct dso *__machine__findnew_compat(struct machine *machine,
        const char *file_name;
        struct dso *dso;
 
-       pthread_rwlock_wrlock(&machine->dsos.lock);
        dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true);
        if (dso)
-               goto out_unlock;
+               goto out;
 
        file_name = vdso__get_compat_file(vdso_file);
        if (!file_name)
-               goto out_unlock;
+               goto out;
 
        dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name);
-out_unlock:
-       pthread_rwlock_unlock(&machine->dsos.lock);
+out:
        return dso;
 }
 
index 7f0c756993af15a0b5cb99ba50dd6f7d0dfca226..3d7dc6afc3f8f9459ca407ee5a16b1b2c9a8119b 100644 (file)
@@ -191,7 +191,7 @@ int main(int argc, char *argv[])
                if (res > 0) {
                        atomic_set(&requeued, 1);
                        break;
-               } else if (res > 0) {
+               } else if (res < 0) {
                        error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
                        ret = RET_ERROR;
                        break;
index 620e37f741b868a231a414a71511cd8872704a3c..1dd087da6f31ae2f38c70042213ee6e5159cee10 100644 (file)
@@ -155,6 +155,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
                list_add_tail(&kvg->node, &kv->group_list);
                kvg->vfio_group = vfio_group;
 
+               kvm_arch_start_assignment(dev->kvm);
+
                mutex_unlock(&kv->lock);
 
                kvm_vfio_update_coherency(dev);
@@ -190,6 +192,8 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
                        break;
                }
 
+               kvm_arch_end_assignment(dev->kvm);
+
                mutex_unlock(&kv->lock);
 
                kvm_vfio_group_put_external_user(vfio_group);
@@ -239,6 +243,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
                kvm_vfio_group_put_external_user(kvg->vfio_group);
                list_del(&kvg->node);
                kfree(kvg);
+               kvm_arch_end_assignment(dev->kvm);
        }
 
        kvm_vfio_update_coherency(dev);